From: Jeffle Xu jefflexu@linux.alibaba.com
anolis inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB5UKT
Reference: https://gitee.com/anolis/cloud-kernel/commit/29a07ee10787
--------------------------------
ANBZ: #1666
commit c8383054506c77b814489c09877b5db83fd4abf2 upstream.
Fscache/CacheFiles used to serve as a local cache for a remote networking fs. A new on-demand read mode will be introduced for CacheFiles, which can boost the scenario where on-demand read semantics are needed, e.g. container image distribution.
The essential difference between these two modes is seen when a cache miss occurs: In the original mode, the netfs will fetch the data from the remote server and then write it to the cache file; in on-demand read mode, fetching the data and writing it into the cache is delegated to a user daemon.
As the first step, notify the user daemon when looking up cookie. In this case, an anonymous fd is sent to the user daemon, through which the user daemon can write the fetched data to the cache file. Since the user daemon may move the anonymous fd around, e.g. through dup(), an object ID uniquely identifying the cache file is also attached.
Also add one advisory flag (FSCACHE_ADV_WANT_CACHE_SIZE) suggesting that the cache file size shall be retrieved at runtime. This helps the scenario where one cache file contains multiple netfs files, e.g. for the purpose of deduplication. In this case, netfs itself has no idea the size of the cache file, whilst the user daemon should give the hint on it.
Signed-off-by: Jeffle Xu jefflexu@linux.alibaba.com Link: https://lore.kernel.org/r/20220509074028.74954-3-jefflexu@linux.alibaba.com Acked-by: David Howells dhowells@redhat.com Signed-off-by: Gao Xiang hsiangkao@linux.alibaba.com Signed-off-by: Huang Jianan jnhuang@linux.alibaba.com Reviewed-by: Gao Xiang hsiangkao@linux.alibaba.com Reviewed-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Baokun Li libaokun1@huawei.com --- fs/cachefiles/Kconfig | 12 + fs/cachefiles/Makefile | 1 + fs/cachefiles/daemon.c | 87 ++++++- fs/cachefiles/internal.h | 51 ++++ fs/cachefiles/namei.c | 10 + fs/cachefiles/ondemand.c | 388 ++++++++++++++++++++++++++++++ include/trace/events/cachefiles.h | 2 + include/uapi/linux/cachefiles.h | 68 ++++++ lib/radix-tree.c | 1 + 9 files changed, 607 insertions(+), 13 deletions(-) create mode 100644 fs/cachefiles/ondemand.c create mode 100644 include/uapi/linux/cachefiles.h
diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig index ff9ca55a9ae9..12174e2616f8 100644 --- a/fs/cachefiles/Kconfig +++ b/fs/cachefiles/Kconfig @@ -38,3 +38,15 @@ config CACHEFILES_HISTOGRAM
See Documentation/filesystems/caching/cachefiles.rst for more information. + +config CACHEFILES_ONDEMAND + bool "Support for on-demand read" + depends on CACHEFILES + default n + help + This permits userspace to enable the cachefiles on-demand read mode. + In this mode, when a cache miss occurs, responsibility for fetching + the data lies with the cachefiles backend instead of with the netfs + and is delegated to userspace. + + If unsure, say N. diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile index 891dedda5905..c247d8b5e4f9 100644 --- a/fs/cachefiles/Makefile +++ b/fs/cachefiles/Makefile @@ -15,5 +15,6 @@ cachefiles-y := \ xattr.o
cachefiles-$(CONFIG_CACHEFILES_HISTOGRAM) += proc.o +cachefiles-$(CONFIG_CACHEFILES_ONDEMAND) += ondemand.o
obj-$(CONFIG_CACHEFILES) := cachefiles.o diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 752c1e43416f..1c85c8dcc0c4 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -73,6 +73,9 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "inuse", cachefiles_daemon_inuse }, { "secctx", cachefiles_daemon_secctx }, { "tag", cachefiles_daemon_tag }, +#ifdef CONFIG_CACHEFILES_ONDEMAND + { "copen", cachefiles_ondemand_copen }, +#endif { "", NULL } };
@@ -106,6 +109,9 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file) rwlock_init(&cache->active_lock); init_waitqueue_head(&cache->daemon_pollwq);
+ INIT_RADIX_TREE(&cache->reqs, GFP_ATOMIC); + idr_init(&cache->ondemand_ids); + /* set default caching limits * - limit at 1% free space and/or free files * - cull below 5% free space and/or free files @@ -123,6 +129,44 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file) return 0; }
+static void cachefiles_flush_reqs(struct cachefiles_cache *cache) +{ + void **slot; + struct radix_tree_iter iter; + struct cachefiles_req *req; + + /* + * Make sure the following two operations won't be reordered. + * 1) set CACHEFILES_DEAD bit + * 2) flush requests in the xarray + * Otherwise the request may be enqueued after xarray has been + * flushed, leaving the orphan request never being completed. + * + * CPU 1 CPU 2 + * ===== ===== + * flush requests in the xarray + * test CACHEFILES_DEAD bit + * enqueue the request + * set CACHEFILES_DEAD bit + */ + smp_mb(); + + xa_lock(&cache->reqs); + radix_tree_for_each_slot(slot, &cache->reqs, &iter, 0) { + req = radix_tree_deref_slot_protected(slot, + &cache->reqs.xa_lock); + BUG_ON(!req); + radix_tree_delete(&cache->reqs, iter.index); + req->error = -EIO; + complete(&req->done); + } + xa_unlock(&cache->reqs); + + xa_lock(&cache->ondemand_ids.idr_rt); + idr_destroy(&cache->ondemand_ids); + xa_unlock(&cache->ondemand_ids.idr_rt); +} + /* * release a cache */ @@ -136,6 +180,8 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file)
set_bit(CACHEFILES_DEAD, &cache->flags);
+ if (cachefiles_in_ondemand_mode(cache)) + cachefiles_flush_reqs(cache); cachefiles_daemon_unbind(cache);
ASSERT(!cache->active_nodes.rb_node); @@ -151,23 +197,14 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file) return 0; }
-/* - * read the cache state - */ -static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, - size_t buflen, loff_t *pos) +static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen, loff_t *pos) { - struct cachefiles_cache *cache = file->private_data; unsigned long long b_released; unsigned f_released; char buffer[256]; int n;
- //_enter(",,%zu,", buflen); - - if (!test_bit(CACHEFILES_READY, &cache->flags)) - return 0; - /* check how much space the cache has */ cachefiles_has_space(cache, 0, 0);
@@ -205,6 +242,25 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, return n; }
+/* + * read the cache state + */ +static ssize_t cachefiles_daemon_read(struct file *file, + char __user *_buffer, size_t buflen, loff_t *pos) +{ + struct cachefiles_cache *cache = file->private_data; + + //_enter(",,%zu,", buflen); + + if (!test_bit(CACHEFILES_READY, &cache->flags)) + return 0; + + if (cachefiles_in_ondemand_mode(cache)) + return cachefiles_ondemand_daemon_read(cache, _buffer, buflen, pos); + else + return cachefiles_do_daemon_read(cache, _buffer, buflen, pos); +} + /* * command the cache */ @@ -296,8 +352,13 @@ static __poll_t cachefiles_daemon_poll(struct file *file, poll_wait(file, &cache->daemon_pollwq, poll); mask = 0;
- if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) - mask |= EPOLLIN; + if (cachefiles_in_ondemand_mode(cache)) { + if (!radix_tree_empty(&cache->reqs)) + mask |= EPOLLIN; + } else { + if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) + mask |= EPOLLIN; + }
if (test_bit(CACHEFILES_CULLING, &cache->flags)) mask |= EPOLLOUT; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index cf9bd6401c2d..fed71ad90808 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -18,6 +18,8 @@ #include <linux/cred.h> #include <linux/workqueue.h> #include <linux/security.h> +#include <linux/cachefiles.h> +#include <linux/idr.h>
struct cachefiles_cache; struct cachefiles_object; @@ -45,10 +47,15 @@ struct cachefiles_object { uint8_t new; /* T if object new */ spinlock_t work_lock; struct rb_node active_node; /* link in active tree (dentry is key) */ +#ifdef CONFIG_CACHEFILES_ONDEMAND + int ondemand_id; +#endif };
extern struct kmem_cache *cachefiles_object_jar;
+#define CACHEFILES_ONDEMAND_ID_CLOSED -1 + /* * Cache files cache definition */ @@ -84,11 +91,30 @@ struct cachefiles_cache { #define CACHEFILES_DEAD 1 /* T if cache dead */ #define CACHEFILES_CULLING 2 /* T if cull engaged */ #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ +#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ char *rootdirname; /* name of cache root directory */ char *secctx; /* LSM security context */ char *tag; /* cache binding tag */ + struct radix_tree_root reqs; /* xarray of pending on-demand requests */ + struct idr ondemand_ids; /* xarray for ondemand_id allocation */ + u32 ondemand_id_next; };
+static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) +{ + return IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND) && + test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags); +} + +struct cachefiles_req { + struct cachefiles_object *object; + struct completion done; + int error; + struct cachefiles_msg msg; +}; + +#define CACHEFILES_REQ_NEW 0 + /* * backing file read tracking */ @@ -217,6 +243,31 @@ extern int cachefiles_allocate_pages(struct fscache_retrieval *, extern int cachefiles_write_page(struct fscache_storage *, struct page *); extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
+/* + * ondemand.c + */ +#ifdef CONFIG_CACHEFILES_ONDEMAND +extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen, loff_t *pos); + +extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, + char *args); + +extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); + +#else +static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen, loff_t *pos) +{ + return -EOPNOTSUPP; +} + +static inline int cachefiles_ondemand_init_object(struct cachefiles_object *object) +{ + return 0; +} +#endif + /* * security.c */ diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index ecc8ecbbfa5a..22a409669fd0 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -592,6 +592,10 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, if (ret < 0) goto no_space_error;
+ ret = cachefiles_ondemand_init_object(object); + if (ret < 0) + goto create_error; + path.dentry = dir; ret = security_path_mknod(&path, next, S_IFREG, 0); if (ret < 0) @@ -636,6 +640,12 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, if (!object->new) { _debug("validate '%pd'", next);
+ ret = cachefiles_ondemand_init_object(object); + if (ret < 0) { + object->dentry = NULL; + goto error; + } + ret = cachefiles_check_object_xattr(object, auxdata); if (ret == -ESTALE) { /* delete the object (the deleter drops the directory diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c new file mode 100644 index 000000000000..ff2f00cfa5d4 --- /dev/null +++ b/fs/cachefiles/ondemand.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/fdtable.h> +#include <linux/file.h> +#include <linux/anon_inodes.h> +#include <linux/uio.h> +#include "internal.h" + +static int cachefiles_ondemand_fd_release(struct inode *inode, + struct file *file) +{ + struct cachefiles_object *object = file->private_data; + int object_id = object->ondemand_id; + struct cachefiles_cache *cache; + + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + + object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + xa_lock(&cache->ondemand_ids.idr_rt); + idr_remove(&cache->ondemand_ids, object_id); + xa_unlock(&cache->ondemand_ids.idr_rt); + object->fscache.cache->ops->put_object(&object->fscache, + cachefiles_obj_put_ondemand_fd); + return 0; +} + +static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, + struct iov_iter *iter) +{ + struct cachefiles_object *object = kiocb->ki_filp->private_data; + struct cachefiles_cache *cache; + size_t len = iter->count; + loff_t pos = kiocb->ki_pos; + struct path path; + struct file *file; + int ret; + + if (!object->backer) + return -ENOBUFS; + + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + + /* write data to the backing filesystem and let it store it in its + * own time */ + path.mnt = cache->mnt; + path.dentry = object->backer; + file = dentry_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, + cache->cache_cred); + if (IS_ERR(file)) + return -ENOBUFS; + + ret = vfs_iter_write(file, iter, &pos, 0); + fput(file); + if (ret != len) + return -EIO; + return len; +} + +static const struct file_operations cachefiles_ondemand_fd_fops = { + .owner = THIS_MODULE, + .release = cachefiles_ondemand_fd_release, + .write_iter = cachefiles_ondemand_fd_write_iter, +}; + +/* + * OPEN request Completion (copen) + * - command: "copen <id>,<cache_size>" + * <cache_size> indicates the object size if >=0, error code if negative + */ +int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + struct fscache_cookie *cookie; + char *pid, *psize; + unsigned long id; + long size; + int ret; + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + if (!*args) { + pr_err("Empty id specified\n"); + return -EINVAL; + } + + pid = args; + psize = strchr(args, ','); + if (!psize) { + pr_err("Cache size is not specified\n"); + return -EINVAL; + } + + *psize = 0; + psize++; + + ret = kstrtoul(pid, 0, &id); + if (ret) + return ret; + + xa_lock(&cache->reqs); + req = radix_tree_delete(&cache->reqs, id); + xa_unlock(&cache->reqs); + if (!req) + return -EINVAL; + + /* fail OPEN request if copen format is invalid */ + ret = kstrtol(psize, 0, &size); + if (ret) { + req->error = ret; + goto out; + } + + /* fail OPEN request if daemon reports an error */ + if (size < 0) { + if (!IS_ERR_VALUE(size)) + size = -EINVAL; + req->error = size; + goto out; + } + + cookie = req->object->fscache.cookie; + fscache_set_store_limit(&req->object->fscache, size); + if (size) + clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + else + set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + +out: + complete(&req->done); + return ret; +} + +static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) +{ + struct cachefiles_object *object = req->object; + struct cachefiles_cache *cache; + struct cachefiles_open *load; + struct file *file; + u32 object_id; + int ret, fd; + + ret = object->fscache.cache->ops->grab_object(&object->fscache, + cachefiles_obj_get_ondemand_fd) ? 0 : -EAGAIN; + if (ret) + return ret; + + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + idr_preload(GFP_KERNEL); + xa_lock(&cache->ondemand_ids.idr_rt); + ret = idr_alloc_cyclic(&cache->ondemand_ids, NULL, + 1, INT_MAX, GFP_ATOMIC); + xa_unlock(&cache->ondemand_ids.idr_rt); + idr_preload_end(); + if (ret < 0) + goto err; + object_id = ret; + + fd = get_unused_fd_flags(O_WRONLY); + if (fd < 0) { + ret = fd; + goto err_free_id; + } + + file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, + object, O_WRONLY); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_put_fd; + } + + file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; + fd_install(fd, file); + + load = (void *)req->msg.data; + load->fd = fd; + req->msg.object_id = object_id; + object->ondemand_id = object_id; + return 0; + +err_put_fd: + put_unused_fd(fd); +err_free_id: + xa_lock(&cache->ondemand_ids.idr_rt); + idr_remove(&cache->ondemand_ids, object_id); + xa_unlock(&cache->ondemand_ids.idr_rt); +err: + object->fscache.cache->ops->put_object(&object->fscache, + cachefiles_obj_put_ondemand_fd); + return ret; +} + +ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen, loff_t *pos) +{ + struct cachefiles_req *req; + struct cachefiles_msg *msg; + unsigned long id = 0; + size_t n; + int ret = 0; + struct radix_tree_iter iter; + void **slot; + + /* + * Search for a request that has not ever been processed, to prevent + * requests from being processed repeatedly. + */ + xa_lock(&cache->reqs); + radix_tree_for_each_tagged(slot, &cache->reqs, &iter, 0, + CACHEFILES_REQ_NEW) { + req = radix_tree_deref_slot_protected(slot, + &cache->reqs.xa_lock); + + msg = &req->msg; + n = msg->len; + + if (n > buflen) { + xa_unlock(&cache->reqs); + return -EMSGSIZE; + } + + radix_tree_iter_tag_clear(&cache->reqs, &iter, + CACHEFILES_REQ_NEW); + xa_unlock(&cache->reqs); + + id = iter.index; + msg->msg_id = id; + + if (msg->opcode == CACHEFILES_OP_OPEN) { + ret = cachefiles_ondemand_get_fd(req); + if (ret) + goto error; + } + + if (copy_to_user(_buffer, msg, n) != 0) { + ret = -EFAULT; + goto err_put_fd; + } + return n; + } + xa_unlock(&cache->reqs); + return 0; + +err_put_fd: + if (msg->opcode == CACHEFILES_OP_OPEN) + __close_fd(current->files, + ((struct cachefiles_open *)msg->data)->fd); +error: + xa_lock(&cache->reqs); + radix_tree_delete(&cache->reqs, id); + xa_unlock(&cache->reqs); + req->error = ret; + complete(&req->done); + return ret; +} + +typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); + +static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + enum cachefiles_opcode opcode, + size_t data_len, + init_req_fn init_req, + void *private) +{ + static atomic64_t global_index = ATOMIC64_INIT(0); + struct cachefiles_cache *cache; + struct cachefiles_req *req; + long id; + int ret; + + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return 0; + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + + req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->object = object; + init_completion(&req->done); + req->msg.opcode = opcode; + req->msg.len = sizeof(struct cachefiles_msg) + data_len; + + ret = init_req(req, private); + if (ret) + goto out; + + /* + * Stop enqueuing the request when daemon is dying. The + * following two operations need to be atomic as a whole. + * 1) check cache state, and + * 2) enqueue request if cache is alive. + * Otherwise the request may be enqueued after xarray has been + * flushed, leaving the orphan request never being completed. + * + * CPU 1 CPU 2 + * ===== ===== + * test CACHEFILES_DEAD bit + * set CACHEFILES_DEAD bit + * flush requests in the xarray + * enqueue the request + */ + xa_lock(&cache->reqs); + + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + xa_unlock(&cache->reqs); + ret = -EIO; + goto out; + } + + /* coupled with the barrier in cachefiles_flush_reqs() */ + smp_mb(); + + while (radix_tree_insert(&cache->reqs, + id = atomic64_read(&global_index), req)) + atomic64_inc(&global_index); + + radix_tree_tag_set(&cache->reqs, id, CACHEFILES_REQ_NEW); + xa_unlock(&cache->reqs); + + wake_up_all(&cache->daemon_pollwq); + wait_for_completion(&req->done); + ret = req->error; +out: + kfree(req); + return ret; +} + +static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, + void *private) +{ + struct cachefiles_object *object = req->object; + struct fscache_cookie *cookie = object->fscache.cookie; + struct fscache_cookie *volume; + struct cachefiles_open *load = (void *)req->msg.data; + size_t volume_key_size, cookie_key_size; + char *cookie_key, *volume_key; + + /* Cookie key is binary data, which is netfs specific. */ + cookie_key_size = cookie->key_len; + if (cookie->key_len <= sizeof(cookie->inline_key)) + cookie_key = cookie->inline_key; + else + cookie_key = cookie->key; + + volume = object->fscache.parent->cookie; + volume_key_size = volume->key_len + 1; + if (volume_key_size <= sizeof(cookie->inline_key)) + volume_key = volume->inline_key; + else + volume_key = volume->key; + + load->volume_key_size = volume_key_size; + load->cookie_key_size = cookie_key_size; + memcpy(load->data, volume_key, volume->key_len); + load->data[volume_key_size - 1] = '\0'; + memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); + return 0; +} + +int cachefiles_ondemand_init_object(struct cachefiles_object *object) +{ + struct fscache_cookie *cookie = object->fscache.cookie; + size_t volume_key_size, cookie_key_size, data_len; + + /* + * CacheFiles will firstly check the cache file under the root cache + * directory. If the coherency check failed, it will fallback to + * creating a new tmpfile as the cache file. Reuse the previously + * allocated object ID if any. + */ + if (object->ondemand_id > 0 || object->type == FSCACHE_COOKIE_TYPE_INDEX) + return 0; + + volume_key_size = object->fscache.parent->cookie->key_len + 1; + cookie_key_size = cookie->key_len; + data_len = sizeof(struct cachefiles_open) + volume_key_size + cookie_key_size; + + return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, + data_len, cachefiles_ondemand_init_open_req, NULL); +} diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 5d9de24cb9c0..d09e369e9d1e 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -21,6 +21,8 @@ enum cachefiles_obj_ref_trace { cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces, cachefiles_obj_put_wait_timeo, + cachefiles_obj_get_ondemand_fd, + cachefiles_obj_put_ondemand_fd, cachefiles_obj_ref__nr_traces };
diff --git a/include/uapi/linux/cachefiles.h b/include/uapi/linux/cachefiles.h new file mode 100644 index 000000000000..78caa73e5343 --- /dev/null +++ b/include/uapi/linux/cachefiles.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_CACHEFILES_H +#define _LINUX_CACHEFILES_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * Fscache ensures that the maximum length of cookie key is 255. The volume key + * is controlled by netfs, and generally no bigger than 255. + */ +#define CACHEFILES_MSG_MAX_SIZE 1024 + +enum cachefiles_opcode { + CACHEFILES_OP_OPEN, + CACHEFILES_OP_CLOSE, + CACHEFILES_OP_READ, +}; + +/* + * Message Header + * + * @msg_id a unique ID identifying this message + * @opcode message type, CACHEFILE_OP_* + * @len message length, including message header and following data + * @object_id a unique ID identifying a cache file + * @data message type specific payload + */ +struct cachefiles_msg { + __u32 msg_id; + __u32 opcode; + __u32 len; + __u32 object_id; + __u8 data[]; +}; + +/* + * @data contains the volume_key followed directly by the cookie_key. volume_key + * is a NUL-terminated string; @volume_key_size indicates the size of the volume + * key in bytes. cookie_key is binary data, which is netfs specific; + * @cookie_key_size indicates the size of the cookie key in bytes. + * + * @fd identifies an anon_fd referring to the cache file. + */ +struct cachefiles_open { + __u32 volume_key_size; + __u32 cookie_key_size; + __u32 fd; + __u32 flags; + __u8 data[]; +}; + +/* + * @off indicates the starting offset of the requested file range + * @len indicates the length of the requested file range + */ +struct cachefiles_read { + __u64 off; + __u64 len; +}; + +/* + * Reply for READ request + * @arg for this ioctl is the @id field of READ request. + */ +#define CACHEFILES_IOC_READ_COMPLETE _IOW(0x98, 1, int) + +#endif diff --git a/lib/radix-tree.c b/lib/radix-tree.c index cbc691525236..28145bdf6f3f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1059,6 +1059,7 @@ void radix_tree_iter_tag_clear(struct radix_tree_root *root, { node_tag_clear(root, iter->node, tag, iter_offset(iter)); } +EXPORT_SYMBOL(radix_tree_iter_tag_clear);
/** * radix_tree_tag_get - get a tag on a radix tree node