From: Jan Kara jack@suse.cz
hulk inclusion category: bugfix bugzilla: 186975, https://gitee.com/openeuler/kernel/issues/I5HT6F CVE: NA
Reference: https://patchwork.ozlabs.org/project/linux-ext4/list/?series=309169
--------------------------------
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- fs/mbcache.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 081ccf0caee3..12203e5a91e7 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -287,7 +287,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); - if (entry->e_referenced) { + if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { entry->e_referenced = 0; list_move_tail(&entry->e_list, &cache->c_list); continue; @@ -301,6 +301,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, spin_unlock(&cache->c_list_lock); head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head); + /* Now a reliable check if the entry didn't get used... */ + if (atomic_read(&entry->e_refcnt) > 2) { + hlist_bl_unlock(head); + spin_lock(&cache->c_list_lock); + list_add_tail(&entry->e_list, &cache->c_list); + cache->c_entry_count++; + continue; + } if (!hlist_bl_unhashed(&entry->e_hash_list)) { hlist_bl_del_init(&entry->e_hash_list); atomic_dec(&entry->e_refcnt);
From: Jan Kara jack@suse.cz
hulk inclusion category: bugfix bugzilla: 186975, https://gitee.com/openeuler/kernel/issues/I5HT6F CVE: NA
Reference: https://patchwork.ozlabs.org/project/linux-ext4/list/?series=309169
--------------------------------
Add function mb_cache_entry_delete_or_get() to delete mbcache entry if it is unused and also add a function to wait for entry to become unused - mb_cache_entry_wait_unused(). We do not share code between the two deleting function as one of them will go away soon.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- fs/mbcache.c | 66 +++++++++++++++++++++++++++++++++++++++-- include/linux/mbcache.h | 10 ++++++- 2 files changed, 73 insertions(+), 3 deletions(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 12203e5a91e7..c76030e20d92 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -10,7 +10,7 @@ /* * Mbcache is a simple key-value store. Keys need not be unique, however * key-value pairs are expected to be unique (we use this fact in - * mb_cache_entry_delete()). + * mb_cache_entry_delete_or_get()). * * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. * Ext4 also uses it for deduplication of xattr values stored in inodes. @@ -124,6 +124,19 @@ void __mb_cache_entry_free(struct mb_cache_entry *entry) } EXPORT_SYMBOL(__mb_cache_entry_free);
+/* + * mb_cache_entry_wait_unused - wait to be the last user of the entry + * + * @entry - entry to work on + * + * Wait to be the last user of the entry. + */ +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) +{ + wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3); +} +EXPORT_SYMBOL(mb_cache_entry_wait_unused); + static struct mb_cache_entry *__entry_find(struct mb_cache *cache, struct mb_cache_entry *entry, u32 key) @@ -216,7 +229,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, } EXPORT_SYMBOL(mb_cache_entry_get);
-/* mb_cache_entry_delete - remove a cache entry +/* mb_cache_entry_delete - try to remove a cache entry * @cache - cache we work with * @key - key * @value - value @@ -253,6 +266,55 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) } EXPORT_SYMBOL(mb_cache_entry_delete);
+/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users + * @cache - cache we work with + * @key - key + * @value - value + * + * Remove entry from cache @cache with key @key and value @value. The removal + * happens only if the entry is unused. The function returns NULL in case the + * entry was successfully removed or there's no entry in cache. Otherwise the + * function grabs reference of the entry that we failed to delete because it + * still has users and return it. + */ +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, + u32 key, u64 value) +{ + struct hlist_bl_node *node; + struct hlist_bl_head *head; + struct mb_cache_entry *entry; + + head = mb_cache_entry_head(cache, key); + hlist_bl_lock(head); + hlist_bl_for_each_entry(entry, node, head, e_hash_list) { + if (entry->e_key == key && entry->e_value == value) { + if (atomic_read(&entry->e_refcnt) > 2) { + atomic_inc(&entry->e_refcnt); + hlist_bl_unlock(head); + return entry; + } + /* We keep hash list reference to keep entry alive */ + hlist_bl_del_init(&entry->e_hash_list); + hlist_bl_unlock(head); + spin_lock(&cache->c_list_lock); + if (!list_empty(&entry->e_list)) { + list_del_init(&entry->e_list); + if (!WARN_ONCE(cache->c_entry_count == 0, + "mbcache: attempt to decrement c_entry_count past zero")) + cache->c_entry_count--; + atomic_dec(&entry->e_refcnt); + } + spin_unlock(&cache->c_list_lock); + mb_cache_entry_put(cache, entry); + return NULL; + } + } + hlist_bl_unlock(head); + + return NULL; +} +EXPORT_SYMBOL(mb_cache_entry_delete_or_get); + /* mb_cache_entry_touch - cache entry got used * @cache - cache the entry belongs to * @entry - entry that got used diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 20f1e3ff6013..8eca7f25c432 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -30,15 +30,23 @@ void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable); void __mb_cache_entry_free(struct mb_cache_entry *entry); +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); static inline int mb_cache_entry_put(struct mb_cache *cache, struct mb_cache_entry *entry) { - if (!atomic_dec_and_test(&entry->e_refcnt)) + unsigned int cnt = atomic_dec_return(&entry->e_refcnt); + + if (cnt > 0) { + if (cnt <= 3) + wake_up_var(&entry->e_refcnt); return 0; + } __mb_cache_entry_free(entry); return 1; }
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, + u32 key, u64 value); void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, u64 value);
From: Jan Kara jack@suse.cz
hulk inclusion category: bugfix bugzilla: 186975, https://gitee.com/openeuler/kernel/issues/I5HT6F CVE: NA
Reference: https://patchwork.ozlabs.org/project/linux-ext4/list/?series=309169
--------------------------------
Currently we remove EA inode from mbcache as soon as its xattr refcount drops to zero. However there can be pending attempts to reuse the inode and thus refcount handling code has to handle the situation when refcount increases from zero anyway. So save some work and just keep EA inode in mbcache until it is getting evicted. At that moment we are sure following iget() of EA inode will fail anyway (or wait for eviction to finish and load things from the disk again) and so removing mbcache entry at that moment is fine and simplifies the code a bit.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- fs/ext4/inode.c | 2 ++ fs/ext4/xattr.c | 24 ++++++++---------------- fs/ext4/xattr.h | 1 + 3 files changed, 11 insertions(+), 16 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5d198b0171ab..9a4125a5da57 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -181,6 +181,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) + ext4_evict_ea_inode(inode); if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index ffaf31584c0e..c6d7fc8d24ce 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -434,6 +434,14 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, return err; }
+/* Remove entry from mbcache when EA inode is getting evicted */ +void ext4_evict_ea_inode(struct inode *inode) +{ + if (EA_INODE_CACHE(inode)) + mb_cache_entry_delete(EA_INODE_CACHE(inode), + ext4_xattr_inode_get_hash(inode), inode->i_ino); +} + static int ext4_xattr_inode_verify_hashes(struct inode *ea_inode, struct ext4_xattr_entry *entry, void *buffer, @@ -973,10 +981,8 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode, static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { - struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode); struct ext4_iloc iloc; s64 ref_count; - u32 hash; int ret;
inode_lock(ea_inode); @@ -1001,14 +1007,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
set_nlink(ea_inode, 1); ext4_orphan_del(handle, ea_inode); - - if (ea_inode_cache) { - hash = ext4_xattr_inode_get_hash(ea_inode); - mb_cache_entry_create(ea_inode_cache, - GFP_NOFS, hash, - ea_inode->i_ino, - true /* reusable */); - } } } else { WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", @@ -1021,12 +1019,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
clear_nlink(ea_inode); ext4_orphan_add(handle, ea_inode); - - if (ea_inode_cache) { - hash = ext4_xattr_inode_get_hash(ea_inode); - mb_cache_entry_delete(ea_inode_cache, hash, - ea_inode->i_ino); - } } }
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 990084e00374..231ef308d10c 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -190,6 +190,7 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, struct ext4_inode *raw_inode, handle_t *handle); +extern void ext4_evict_ea_inode(struct inode *inode);
extern const struct xattr_handler *ext4_xattr_handlers[];
From: Jan Kara jack@suse.cz
hulk inclusion category: bugfix bugzilla: 186975, https://gitee.com/openeuler/kernel/issues/I5HT6F CVE: NA
Reference: https://patchwork.ozlabs.org/project/linux-ext4/list/?series=309169
--------------------------------
Remove unnecessary else (and thus indentation level) from a code block in ext4_xattr_block_set(). It will also make following code changes easier. No functional changes.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz Conflicts: fs/ext4/xattr.c [ 188c299e2a26 ("ext4: Support for checksumming from journal triggers") is not applied. ] Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- fs/ext4/xattr.c | 79 ++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 40 deletions(-)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index c6d7fc8d24ce..3ec4833d93df 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1847,6 +1847,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, #define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) { + int offset = (char *)s->here - bs->bh->b_data; + BUFFER_TRACE(bs->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, bs->bh); if (error) @@ -1878,50 +1880,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, if (error) goto cleanup; goto inserted; - } else { - int offset = (char *)s->here - bs->bh->b_data; + } + unlock_buffer(bs->bh); + ea_bdebug(bs->bh, "cloning"); + s->base = kmalloc(bs->bh->b_size, GFP_NOFS); + error = -ENOMEM; + if (s->base == NULL) + goto cleanup; + memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); + s->first = ENTRY(header(s->base)+1); + header(s->base)->h_refcount = cpu_to_le32(1); + s->here = ENTRY(s->base + offset); + s->end = s->base + bs->bh->b_size;
- unlock_buffer(bs->bh); - ea_bdebug(bs->bh, "cloning"); - s->base = kmalloc(bs->bh->b_size, GFP_NOFS); - error = -ENOMEM; - if (s->base == NULL) + /* + * If existing entry points to an xattr inode, we need + * to prevent ext4_xattr_set_entry() from decrementing + * ref count on it because the reference belongs to the + * original block. In this case, make the entry look + * like it has an empty value. + */ + if (!s->not_found && s->here->e_value_inum) { + ea_ino = le32_to_cpu(s->here->e_value_inum); + error = ext4_xattr_inode_iget(inode, ea_ino, + le32_to_cpu(s->here->e_hash), + &tmp_inode); + if (error) goto cleanup; - memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); - s->first = ENTRY(header(s->base)+1); - header(s->base)->h_refcount = cpu_to_le32(1); - s->here = ENTRY(s->base + offset); - s->end = s->base + bs->bh->b_size;
- /* - * If existing entry points to an xattr inode, we need - * to prevent ext4_xattr_set_entry() from decrementing - * ref count on it because the reference belongs to the - * original block. In this case, make the entry look - * like it has an empty value. - */ - if (!s->not_found && s->here->e_value_inum) { - ea_ino = le32_to_cpu(s->here->e_value_inum); - error = ext4_xattr_inode_iget(inode, ea_ino, - le32_to_cpu(s->here->e_hash), - &tmp_inode); - if (error) - goto cleanup; - - if (!ext4_test_inode_state(tmp_inode, - EXT4_STATE_LUSTRE_EA_INODE)) { - /* - * Defer quota free call for previous - * inode until success is guaranteed. - */ - old_ea_inode_quota = le32_to_cpu( - s->here->e_value_size); - } - iput(tmp_inode); - - s->here->e_value_inum = 0; - s->here->e_value_size = 0; + if (!ext4_test_inode_state(tmp_inode, + EXT4_STATE_LUSTRE_EA_INODE)) { + /* + * Defer quota free call for previous + * inode until success is guaranteed. + */ + old_ea_inode_quota = le32_to_cpu( + s->here->e_value_size); } + iput(tmp_inode); + + s->here->e_value_inum = 0; + s->here->e_value_size = 0; } } else { /* Allocate a buffer where we construct the new block. */
From: Jan Kara jack@suse.cz
hulk inclusion category: bugfix bugzilla: 186975, https://gitee.com/openeuler/kernel/issues/I5HT6F CVE: NA
Reference: https://patchwork.ozlabs.org/project/linux-ext4/list/?series=309169
--------------------------------
When ext4_xattr_block_set() decides to remove xattr block the following race can happen:
CPU1 CPU2 ext4_xattr_block_set() ext4_xattr_release_block() new_bh = ext4_xattr_block_cache_find()
lock_buffer(bh); ref = le32_to_cpu(BHDR(bh)->h_refcount); if (ref == 1) { ... mb_cache_entry_delete(); unlock_buffer(bh); ext4_free_blocks(); ... ext4_forget(..., bh, ...); jbd2_journal_revoke(..., bh);
ext4_journal_get_write_access(..., new_bh, ...) do_get_write_access() jbd2_journal_cancel_revoke(..., new_bh);
Later the code in ext4_xattr_block_set() finds out the block got freed and cancels reusal of the block but the revoke stays canceled and so in case of block reuse and journal replay the filesystem can get corrupted. If the race works out slightly differently, we can also hit assertions in the jbd2 code.
Fix the problem by making sure that once matching mbcache entry is found, code dropping the last xattr block reference (or trying to modify xattr block in place) waits until the mbcache entry reference is dropped. This way code trying to reuse xattr block is protected from someone trying to drop the last reference to xattr block.
Reported-and-tested-by: Ritesh Harjani ritesh.list@gmail.com CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- fs/ext4/xattr.c | 67 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 22 deletions(-)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 3ec4833d93df..8bc3f5cc11e3 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -437,9 +437,16 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, /* Remove entry from mbcache when EA inode is getting evicted */ void ext4_evict_ea_inode(struct inode *inode) { - if (EA_INODE_CACHE(inode)) - mb_cache_entry_delete(EA_INODE_CACHE(inode), - ext4_xattr_inode_get_hash(inode), inode->i_ino); + struct mb_cache_entry *oe; + + if (!EA_INODE_CACHE(inode)) + return; + /* Wait for entry to get unused so that we can remove it */ + while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode), + ext4_xattr_inode_get_hash(inode), inode->i_ino))) { + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(EA_INODE_CACHE(inode), oe); + } }
static int @@ -1228,6 +1235,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, if (error) goto out;
+retry_ref: lock_buffer(bh); hash = le32_to_cpu(BHDR(bh)->h_hash); ref = le32_to_cpu(BHDR(bh)->h_refcount); @@ -1237,9 +1245,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ - if (ea_block_cache) - mb_cache_entry_delete(ea_block_cache, hash, - bh->b_blocknr); + if (ea_block_cache) { + struct mb_cache_entry *oe; + + oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, + bh->b_blocknr); + if (oe) { + unlock_buffer(bh); + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(ea_block_cache, oe); + goto retry_ref; + } + } get_bh(bh); unlock_buffer(bh);
@@ -1863,9 +1880,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, * ext4_xattr_block_set() to reliably detect modified * block */ - if (ea_block_cache) - mb_cache_entry_delete(ea_block_cache, hash, - bs->bh->b_blocknr); + if (ea_block_cache) { + struct mb_cache_entry *oe; + + oe = mb_cache_entry_delete_or_get(ea_block_cache, + hash, bs->bh->b_blocknr); + if (oe) { + /* + * Xattr block is getting reused. Leave + * it alone. + */ + mb_cache_entry_put(ea_block_cache, oe); + goto clone_block; + } + } ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */); @@ -1881,6 +1909,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, goto cleanup; goto inserted; } +clone_block: unlock_buffer(bs->bh); ea_bdebug(bs->bh, "cloning"); s->base = kmalloc(bs->bh->b_size, GFP_NOFS); @@ -1987,18 +2016,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, lock_buffer(new_bh); /* * We have to be careful about races with - * freeing, rehashing or adding references to - * xattr block. Once we hold buffer lock xattr - * block's state is stable so we can check - * whether the block got freed / rehashed or - * not. Since we unhash mbcache entry under - * buffer lock when freeing / rehashing xattr - * block, checking whether entry is still - * hashed is reliable. Same rules hold for - * e_reusable handling. + * adding references to xattr block. Once we + * hold buffer lock xattr block's state is + * stable so we can check the additional + * reference fits. */ - if (hlist_bl_unhashed(&ce->e_hash_list) || - !ce->e_reusable) { + ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; + if (ref > EXT4_XATTR_REFCOUNT_MAX) { /* * Undo everything and check mbcache * again. @@ -2013,9 +2037,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, new_bh = NULL; goto inserted; } - ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; BHDR(new_bh)->h_refcount = cpu_to_le32(ref); - if (ref >= EXT4_XATTR_REFCOUNT_MAX) + if (ref == EXT4_XATTR_REFCOUNT_MAX) ce->e_reusable = 0; ea_bdebug(new_bh, "reusing; refcount now=%d", ref);