From: Hongbo Li <lihongbo22@huawei.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5W1P ------------------ In remote mode, undo events are placed on the syncer list. The syncer object is a stack variable and will be released if the process is interrupted or the syncer is woken up during the closing of the device's file descriptor. When the daemon exits, it calls `dev_release` and wakes up all events. Once the syncer is marked as completed, accessing it will lead to UAF vulnerability. Therefore, the syncer must never be used after its events have been completed. Additionally, `mfs_cancel_syncer_events` is called when a user process terminates. There are scenarios where event completion may coincide with other cleanup tasks, which can result in invalid event references. To prevent this, the `xa_lock` is used to protect against the race condition. Fixes: 3236f3b541d1 ("mfs: Add communication devie and event acquisition operations") Signed-off-by: Hongbo Li <lihongbo22@huawei.com> --- fs/mfs/cache.c | 46 +++++++++++++++++++++++----------------------- fs/mfs/dev.c | 41 +++++++++++++---------------------------- 2 files changed, 36 insertions(+), 51 deletions(-) diff --git a/fs/mfs/cache.c b/fs/mfs/cache.c index 2178c78f4f1a..994638065f85 100644 --- a/fs/mfs/cache.c +++ b/fs/mfs/cache.c @@ -78,8 +78,6 @@ static long _ioc_done(struct mfs_cache_object *object, return -EINVAL; } xas_store(&xas, NULL); - xas_unlock(&xas); - syncer = event->syncer; if (done->ret) atomic_cmpxchg(&syncer->res, 0, -EIO); @@ -88,6 +86,8 @@ static long _ioc_done(struct mfs_cache_object *object, spin_unlock(&syncer->list_lock); if (atomic_dec_return(&syncer->notback) == 0) complete(&syncer->done); + xas_unlock(&xas); + put_mfs_event(event); return 0; } @@ -366,9 +366,7 @@ void mfs_destroy_events(struct super_block *sb) */ pr_warn("Event remains:%lu\n", index); __xa_erase(&caches->events, index); - xa_unlock(&caches->events); put_mfs_event(event); - xa_lock(&caches->events); } xa_unlock(&caches->events); xa_destroy(&caches->events); @@ -379,19 +377,18 @@ void mfs_cancel_syncer_events(struct mfs_cache_object *object, { struct mfs_sb_info *sbi = MFS_SB(object->mfs_inode->i_sb); struct mfs_caches *caches = &sbi->caches; - struct mfs_event *event; - struct list_head tmp; + struct xarray *xa = &caches->events; + struct mfs_event *event, *nevent; - INIT_LIST_HEAD(&tmp); + xa_lock(xa); spin_lock(&syncer->list_lock); - list_splice_init(&syncer->head, &tmp); - spin_unlock(&syncer->list_lock); - - list_for_each_entry(event, &tmp, link) { - xa_erase(&caches->events, event->msg.id); - iput(event->object->mfs_inode); - kfree(event); + list_for_each_entry_safe(event, nevent, &syncer->head, link) { + __xa_erase(&caches->events, event->msg.id); + list_del(&event->link); + put_mfs_event(event); } + spin_unlock(&syncer->list_lock); + xa_unlock(xa); } void mfs_cancel_all_events(struct mfs_sb_info *sbi) @@ -405,18 +402,21 @@ void mfs_cancel_all_events(struct mfs_sb_info *sbi) xa_lock(xa); xa_for_each(xa, index, event) { __xa_erase(xa, index); - xa_unlock(xa); - if (event->syncer) { - syncer = event->syncer; - if (atomic_dec_return(&syncer->notback) == 0) - complete(&syncer->done); + syncer = event->syncer; + /* + * Here should keep syncer (a stack variable), so we should + * wakeup the syncer list in the protect of xa lock. + */ + if (syncer) { spin_lock(&syncer->list_lock); - list_del_init(&event->link); + list_del(&event->link); spin_unlock(&syncer->list_lock); + if (atomic_dec_return(&syncer->notback) == 0) { + atomic_cmpxchg(&syncer->res, 0, -EIO); + complete(&syncer->done); + } } - iput(event->object->mfs_inode); - kfree(event); - xa_lock(xa); + put_mfs_event(event); } caches->next_ev = 0; caches->next_msg = 0; diff --git a/fs/mfs/dev.c b/fs/mfs/dev.c index 15b55b257d2f..e957be7f02a7 100644 --- a/fs/mfs/dev.c +++ b/fs/mfs/dev.c @@ -20,23 +20,6 @@ static const struct class mfs_dev_class = { }; static struct device *mfs_dev; -static inline void mfs_finish_event(struct mfs_event *event, struct xa_state *xas) -{ - struct mfs_syncer *syncer = event->syncer; - - if (unlikely(!xas || !event)) - return; - - if (syncer) { - if (xa_cmpxchg(xas->xa, xas->xa_index, event, NULL, 0) != event) - return; - - if (atomic_dec_and_test(&syncer->notback)) - complete(&syncer->done); - put_mfs_event(event); - } -} - static int mfs_dev_open(struct inode *inode, struct file *file) { struct mfs_caches *caches; @@ -105,32 +88,34 @@ static ssize_t mfs_dev_read(struct file *file, char __user *buf, xas_unlock(&xas); return 0; } + if (event->syncer) + get_mfs_event(event); xas_unlock(&xas); msg = &event->msg; n = msg->len; - if (n > blen) - return -EMSGSIZE; + if (n > blen) { + ret = -EMSGSIZE; + goto out; + } ret = try_hook_fd(event); if (ret < 0) - return ret; + goto out; msg->fd = ret; ret = 0; + if (copy_to_user(buf, msg, n)) { + ret = -EFAULT; + goto out; + } xas_lock(&xas); xas_clear_mark(&xas, MFS_EVENT_NEW); caches->next_ev = xas.xa_index + 1; - if (event->syncer) - get_mfs_event(event); - else + if (!event->syncer) xas_store(&xas, NULL); xas_unlock(&xas); - - if (copy_to_user(buf, msg, n)) - ret = -EFAULT; - if (ret) - mfs_finish_event(event, &xas); +out: put_mfs_event(event); trace_mfs_dev_read(file, msg->opcode, msg->id, msg->fd); return ret ? ret : n; -- 2.25.1