Introduce failover mechanism for on-demand mode
Jia Zhu (5): cachefiles: introduce object ondemand state cachefiles: extract ondemand info field from cachefiles_object cachefiles: resend an open request if the read request's object is closed cachefiles: narrow the scope of triggering EPOLLIN events in ondemand mode cachefiles: add restore command to recover inflight ondemand read requests
fs/cachefiles/daemon.c | 15 +++- fs/cachefiles/interface.c | 7 +- fs/cachefiles/internal.h | 59 +++++++++++++- fs/cachefiles/ondemand.c | 166 ++++++++++++++++++++++++++++---------- 4 files changed, 201 insertions(+), 46 deletions(-)
From: Jia Zhu zhujia.zj@bytedance.com
mainline inclusion from mainline-v6.8-rc1 commit 357a18d033143617e9c7d420c8f0dd4cbab5f34d category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA6I1T
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Previously, @ondemand_id field was used not only to identify ondemand state of the object, but also to represent the index of the xarray. This commit introduces @state field to decouple the role of @ondemand_id and adds helpers to access it.
Signed-off-by: Jia Zhu zhujia.zj@bytedance.com Link: https://lore.kernel.org/r/20231120041422.75170-2-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu jefflexu@linux.alibaba.com Reviewed-by: David Howells dhowells@redhat.com Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/cachefiles/internal.h | 21 +++++++++++++++++++++ fs/cachefiles/ondemand.c | 21 +++++++++------------ 2 files changed, 30 insertions(+), 12 deletions(-)
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 2ad58c465208..00beedeaec18 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -44,6 +44,11 @@ struct cachefiles_volume { struct dentry *fanout[256]; /* Fanout subdirs */ };
+enum cachefiles_object_state { + CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ + CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ +}; + /* * Backing file state. */ @@ -62,6 +67,7 @@ struct cachefiles_object { #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ #ifdef CONFIG_CACHEFILES_ONDEMAND int ondemand_id; + enum cachefiles_object_state state; #endif };
@@ -296,6 +302,21 @@ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len);
+#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ +static inline bool \ +cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ +{ \ + return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} \ + \ +static inline void \ +cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ +{ \ + object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} + +CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); +CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 0254ed39f68c..90456b8a4b3e 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -15,6 +15,7 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
xa_lock(&cache->reqs); object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + cachefiles_ondemand_set_object_close(object);
/* * Flush all pending READ requests since their completion depends on @@ -176,6 +177,8 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); trace_cachefiles_ondemand_copen(req->object, id, size);
+ cachefiles_ondemand_set_object_open(req->object); + out: complete(&req->done); return ret; @@ -363,7 +366,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb();
- if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { + if (opcode != CACHEFILES_OP_OPEN && + !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; @@ -430,18 +434,11 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, void *private) { struct cachefiles_object *object = req->object; - int object_id = object->ondemand_id;
- /* - * It's possible that object id is still 0 if the cookie looking up - * phase failed before OPEN request has ever been sent. Also avoid - * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means - * anon_fd has already been closed. - */ - if (object_id <= 0) + if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT;
- req->msg.object_id = object_id; + req->msg.object_id = object->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -460,7 +457,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, int object_id = object->ondemand_id;
/* Stop enqueuing requests when daemon has closed anon_fd. */ - if (object_id <= 0) { + if (!cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object_id == 0); pr_info_once("READ: anonymous fd closed prematurely.\n"); return -EIO; @@ -485,7 +482,7 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) * creating a new tmpfile as the cache file. Reuse the previously * allocated object ID if any. */ - if (object->ondemand_id > 0) + if (cachefiles_ondemand_object_is_open(object)) return 0;
volume_key_size = volume->key[0] + 1;
From: Jia Zhu zhujia.zj@bytedance.com
mainline inclusion from mainline-v6.8-rc1 commit 3c5ecfe16e7699011c12c2d44e55437415331fa3 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA6I1T
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
We'll introduce a @work_struct field for @object in subsequent patches, it will enlarge the size of @object. As the result of that, this commit extracts ondemand info field from @object.
Signed-off-by: Jia Zhu zhujia.zj@bytedance.com Link: https://lore.kernel.org/r/20231120041422.75170-3-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu jefflexu@linux.alibaba.com Reviewed-by: David Howells dhowells@redhat.com Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/cachefiles/interface.c | 7 ++++++- fs/cachefiles/internal.h | 26 ++++++++++++++++++++++---- fs/cachefiles/ondemand.c | 34 ++++++++++++++++++++++++++++------ 3 files changed, 56 insertions(+), 11 deletions(-)
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 40052bdb3365..35ba2117a6f6 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie) if (!object) return NULL;
+ if (cachefiles_ondemand_init_obj_info(object, volume)) { + kmem_cache_free(cachefiles_object_jar, object); + return NULL; + } + refcount_set(&object->ref, 1);
spin_lock_init(&object->lock); @@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object, ASSERTCMP(object->file, ==, NULL);
kfree(object->d_name); - + cachefiles_ondemand_deinit_obj_info(object); cache = object->volume->cache->cache; fscache_put_cookie(object->cookie, fscache_cookie_put_object); object->cookie = NULL; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 00beedeaec18..b0fe76964bc0 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -49,6 +49,12 @@ enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ };
+struct cachefiles_ondemand_info { + int ondemand_id; + enum cachefiles_object_state state; + struct cachefiles_object *object; +}; + /* * Backing file state. */ @@ -66,8 +72,7 @@ struct cachefiles_object { unsigned long flags; #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ #ifdef CONFIG_CACHEFILES_ONDEMAND - int ondemand_id; - enum cachefiles_object_state state; + struct cachefiles_ondemand_info *ondemand; #endif };
@@ -302,17 +307,21 @@ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len);
+extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume); +extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj); + #define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ static inline bool \ cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ { \ - return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ + return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ } \ \ static inline void \ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ { \ - object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ + object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ }
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); @@ -338,6 +347,15 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object, { return -EOPNOTSUPP; } + +static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume) +{ + return 0; +} +static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) +{ +} #endif
/* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 90456b8a4b3e..deb7e3007aa1 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -9,12 +9,13 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, { struct cachefiles_object *object = file->private_data; struct cachefiles_cache *cache = object->volume->cache; - int object_id = object->ondemand_id; + struct cachefiles_ondemand_info *info = object->ondemand; + int object_id = info->ondemand_id; struct cachefiles_req *req; XA_STATE(xas, &cache->reqs, 0);
xa_lock(&cache->reqs); - object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object);
/* @@ -222,7 +223,7 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; req->msg.object_id = object_id; - object->ondemand_id = object_id; + object->ondemand->ondemand_id = object_id;
cachefiles_get_unbind_pincount(cache); trace_cachefiles_ondemand_open(object, &req->msg, load); @@ -368,7 +369,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
if (opcode != CACHEFILES_OP_OPEN && !cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object->ondemand_id == 0); + WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; goto out; @@ -438,7 +439,7 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT;
- req->msg.object_id = object->ondemand_id; + req->msg.object_id = object->ondemand->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -454,7 +455,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand_id; + int object_id = object->ondemand->ondemand_id;
/* Stop enqueuing requests when daemon has closed anon_fd. */ if (!cachefiles_ondemand_object_is_open(object)) { @@ -500,6 +501,27 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object) cachefiles_ondemand_init_close_req, NULL); }
+int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, + struct cachefiles_volume *volume) +{ + if (!cachefiles_in_ondemand_mode(volume->cache)) + return 0; + + object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info), + GFP_KERNEL); + if (!object->ondemand) + return -ENOMEM; + + object->ondemand->object = object; + return 0; +} + +void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object) +{ + kfree(object->ondemand); + object->ondemand = NULL; +} + int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len) {
From: Jia Zhu zhujia.zj@bytedance.com
mainline inclusion from mainline-v6.8-rc1 commit 0a7e54c1959c0feb2de23397ec09c7692364313e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA6I1T
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
When an anonymous fd is closed by user daemon, if there is a new read request for this file comes up, the anonymous fd should be re-opened to handle that read request rather than fail it directly.
1. Introduce reopening state for objects that are closed but have inflight/subsequent read requests. 2. No longer flush READ requests but only CLOSE requests when anonymous fd is closed. 3. Enqueue the reopen work to workqueue, thus user daemon could get rid of daemon_read context and handle that request smoothly. Otherwise, the user daemon will send a reopen request and wait for itself to process the request.
Signed-off-by: Jia Zhu zhujia.zj@bytedance.com Link: https://lore.kernel.org/r/20231120041422.75170-4-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu jefflexu@linux.alibaba.com Reviewed-by: David Howells dhowells@redhat.com Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/cachefiles/internal.h | 3 ++ fs/cachefiles/ondemand.c | 98 ++++++++++++++++++++++++++++------------ 2 files changed, 72 insertions(+), 29 deletions(-)
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index b0fe76964bc0..b9a90f1a0c01 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -47,9 +47,11 @@ struct cachefiles_volume { enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ + CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ };
struct cachefiles_ondemand_info { + struct work_struct ondemand_work; int ondemand_id; enum cachefiles_object_state state; struct cachefiles_object *object; @@ -326,6 +328,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); +CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index deb7e3007aa1..8e130de952f7 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -18,14 +18,10 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object);
- /* - * Flush all pending READ requests since their completion depends on - * anon_fd. - */ - xas_for_each(&xas, req, ULONG_MAX) { + /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { if (req->msg.object_id == object_id && - req->msg.opcode == CACHEFILES_OP_READ) { - req->error = -EIO; + req->msg.opcode == CACHEFILES_OP_CLOSE) { complete(&req->done); xas_store(&xas, NULL); } @@ -179,6 +175,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) trace_cachefiles_ondemand_copen(req->object, id, size);
cachefiles_ondemand_set_object_open(req->object); + wake_up_all(&cache->daemon_pollwq);
out: complete(&req->done); @@ -222,7 +219,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
load = (void *)req->msg.data; load->fd = fd; - req->msg.object_id = object_id; object->ondemand->ondemand_id = object_id;
cachefiles_get_unbind_pincount(cache); @@ -238,6 +234,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) return ret; }
+static void ondemand_object_worker(struct work_struct *work) +{ + struct cachefiles_ondemand_info *info = + container_of(work, struct cachefiles_ondemand_info, ondemand_work); + + cachefiles_ondemand_init_object(info->object); +} + +/* + * If there are any inflight or subsequent READ requests on the + * closed object, reopen it. + * Skip read requests whose related object is reopening. + */ +static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas, + unsigned long xa_max) +{ + struct cachefiles_req *req; + struct cachefiles_object *object; + struct cachefiles_ondemand_info *info; + + xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) { + if (req->msg.opcode != CACHEFILES_OP_READ) + return req; + object = req->object; + info = object->ondemand; + if (cachefiles_ondemand_object_is_close(object)) { + cachefiles_ondemand_set_object_reopening(object); + queue_work(fscache_wq, &info->ondemand_work); + continue; + } + if (cachefiles_ondemand_object_is_reopening(object)) + continue; + return req; + } + return NULL; +} + ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) { @@ -248,16 +281,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, int ret = 0; XA_STATE(xas, &cache->reqs, cache->req_id_next);
+ xa_lock(&cache->reqs); /* * Cyclically search for a request that has not ever been processed, * to prevent requests from being processed repeatedly, and make * request distribution fair. */ - xa_lock(&cache->reqs); - req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, ULONG_MAX); if (!req && cache->req_id_next > 0) { xas_set(&xas, 0); - req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1); } if (!req) { xa_unlock(&cache->reqs); @@ -277,14 +310,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xa_unlock(&cache->reqs);
id = xas.xa_index; - msg->msg_id = id;
if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); - if (ret) + if (ret) { + cachefiles_ondemand_set_object_close(req->object); goto error; + } }
+ msg->msg_id = id; + msg->object_id = req->object->ondemand->ondemand_id; + if (copy_to_user(_buffer, msg, n) != 0) { ret = -EFAULT; goto err_put_fd; @@ -317,19 +354,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, void *private) { struct cachefiles_cache *cache = object->volume->cache; - struct cachefiles_req *req; + struct cachefiles_req *req = NULL; XA_STATE(xas, &cache->reqs, 0); int ret;
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return 0;
- if (test_bit(CACHEFILES_DEAD, &cache->flags)) - return -EIO; + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + ret = -EIO; + goto out; + }
req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + if (!req) { + ret = -ENOMEM; + goto out; + }
req->object = object; init_completion(&req->done); @@ -367,7 +408,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb();
- if (opcode != CACHEFILES_OP_OPEN && + if (opcode == CACHEFILES_OP_CLOSE && !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); @@ -392,7 +433,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, wake_up_all(&cache->daemon_pollwq); wait_for_completion(&req->done); ret = req->error; + kfree(req); + return ret; out: + /* Reset the object to close state in error handling path. + * If error occurs after creating the anonymous fd, + * cachefiles_ondemand_fd_release() will set object to close. + */ + if (opcode == CACHEFILES_OP_OPEN) + cachefiles_ondemand_set_object_close(object); kfree(req); return ret; } @@ -439,7 +488,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT;
- req->msg.object_id = object->ondemand->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -455,16 +503,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand->ondemand_id; - - /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (!cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object_id == 0); - pr_info_once("READ: anonymous fd closed prematurely.\n"); - return -EIO; - }
- req->msg.object_id = object_id; load->off = read_ctx->off; load->len = read_ctx->len; trace_cachefiles_ondemand_read(object, &req->msg, load); @@ -513,6 +552,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, return -ENOMEM;
object->ondemand->object = object; + INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker); return 0; }
From: Jia Zhu zhujia.zj@bytedance.com
mainline inclusion from mainline-v6.8-rc1 commit b817e22b2e91257ace32a6768c3c003faeaa1c5c category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA6I1T
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Don't trigger EPOLLIN when there are only reopening read requests in xarray.
Suggested-by: Xin Yin yinxin.x@bytedance.com Signed-off-by: Jia Zhu zhujia.zj@bytedance.com Link: https://lore.kernel.org/r/20231120041422.75170-5-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu jefflexu@linux.alibaba.com Reviewed-by: David Howells dhowells@redhat.com Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/cachefiles/daemon.c | 14 ++++++++++++-- fs/cachefiles/internal.h | 12 ++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 5f4df9588620..4611fcfb8182 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -355,14 +355,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file, struct poll_table_struct *poll) { struct cachefiles_cache *cache = file->private_data; + XA_STATE(xas, &cache->reqs, 0); + struct cachefiles_req *req; __poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll); mask = 0;
if (cachefiles_in_ondemand_mode(cache)) { - if (!xa_empty(&cache->reqs)) - mask |= EPOLLIN; + if (!xa_empty(&cache->reqs)) { + rcu_read_lock(); + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { + if (!cachefiles_ondemand_is_reopening_read(req)) { + mask |= EPOLLIN; + break; + } + } + rcu_read_unlock(); + } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) mask |= EPOLLIN; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index b9a90f1a0c01..26e5f8f123ef 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -329,6 +329,13 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return cachefiles_ondemand_object_is_reopening(req->object) && + req->msg.opcode == CACHEFILES_OP_READ; +} + #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) @@ -359,6 +366,11 @@ static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *ob static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) { } + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return false; +} #endif
/*
From: Jia Zhu zhujia.zj@bytedance.com
mainline inclusion from mainline-v6.8-rc1 commit e73fa11a356ca0905c3cc648eaacc6f0f2d2c8b3 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA6I1T
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Previously, in ondemand read scenario, if the anonymous fd was closed by user daemon, inflight and subsequent read requests would return EIO. As long as the device connection is not released, user daemon can hold and restore inflight requests by setting the request flag to CACHEFILES_REQ_NEW.
Suggested-by: Gao Xiang hsiangkao@linux.alibaba.com Signed-off-by: Jia Zhu zhujia.zj@bytedance.com Signed-off-by: Xin Yin yinxin.x@bytedance.com Link: https://lore.kernel.org/r/20231120041422.75170-6-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu jefflexu@linux.alibaba.com Reviewed-by: David Howells dhowells@redhat.com Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/cachefiles/daemon.c | 1 + fs/cachefiles/internal.h | 3 +++ fs/cachefiles/ondemand.c | 23 +++++++++++++++++++++++ 3 files changed, 27 insertions(+)
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 4611fcfb8182..6465e2574230 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "tag", cachefiles_daemon_tag }, #ifdef CONFIG_CACHEFILES_ONDEMAND { "copen", cachefiles_ondemand_copen }, + { "restore", cachefiles_ondemand_restore }, #endif { "", NULL } }; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 26e5f8f123ef..4a87c9d714a9 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -303,6 +303,9 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args);
+extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache, + char *args); + extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object);
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 8e130de952f7..b8fbbb1961bb 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -182,6 +182,29 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) return ret; }
+int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + + XA_STATE(xas, &cache->reqs, 0); + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + /* + * Reset the requests to CACHEFILES_REQ_NEW state, so that the + * requests have been processed halfway before the crash of the + * user daemon could be reprocessed after the recovery. + */ + xas_lock(&xas); + xas_for_each(&xas, req, ULONG_MAX) + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + + wake_up_all(&cache->daemon_pollwq); + return 0; +} + static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) { struct cachefiles_object *object;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/9170 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/B...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/9170 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/B...