From: Zhang Yi <yi.zhang@huawei.com> hulk inclusion category: bugfix bugzilla: https://atomgit.com/openeuler/kernel/issues/8477 -------------------------------- In ext4_alloc_file_blocks(), pagecache_isize_extended() is called under an active handle and may also hold folio lock if the block size is smaller than the folio size. This also breaks the "folio lock -> transaction start" lock ordering for the upcoming iomap buffered I/O path. Therefore, move pagecache_isize_extended() outside of an active handle. Additionally, it is unnecessary to update the file length during each iteration of the allocation loop. Instead, update the file length only to the position where the allocation is successful. Postpone updating the inode size until after the allocation loop completes or is interrupted due to an error. Fixes: 5721968224e0 ("ext4: implement zero_range iomap path") Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Signed-off-by: Yongjian Sun <sunyongjian1@huawei.com> --- fs/ext4/extents.c | 50 +++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index c992399b5106..dbc2154f7d4e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4500,7 +4500,7 @@ static int ext4_alloc_file_blocks(struct file *file, loff_t offset, loff_t len, ext4_lblk_t len_lblk; struct ext4_map_blocks map; unsigned int credits; - loff_t epos, old_size = i_size_read(inode); + loff_t epos = 0, old_size = i_size_read(inode); unsigned int blkbits = inode->i_blkbits; BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); @@ -4553,31 +4553,47 @@ static int ext4_alloc_file_blocks(struct file *file, loff_t offset, loff_t len, ext4_journal_stop(handle); break; } + ext4_update_inode_fsync_trans(handle, inode, 1); + ret = ext4_journal_stop(handle); + if (unlikely(ret)) + break; + /* * allow a full retry cycle for any remaining allocations */ retries = 0; - map.m_lblk += ret; - map.m_len = len_lblk = len_lblk - ret; + map.m_lblk += map.m_len; + map.m_len = len_lblk = len_lblk - map.m_len; epos = (loff_t)map.m_lblk << inode->i_blkbits; - if (new_size) { - if (epos > new_size) - epos = new_size; - ext4_update_inode_size(inode, epos); - if (epos > old_size) - pagecache_isize_extended(inode, old_size, epos); - } - ret2 = ext4_mark_inode_dirty(handle, inode); - ext4_update_inode_fsync_trans(handle, inode, 1); - ret3 = ext4_journal_stop(handle); - ret2 = ret3 ? ret3 : ret2; - if (unlikely(ret2)) - break; } + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; - return ret > 0 ? ret2 : ret; + if (!epos || !new_size) + return ret; + + /* + * Allocate blocks, update the file size to match the size of the + * already successfully allocated blocks. + */ + if (epos > new_size) + epos = new_size; + + handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); + if (IS_ERR(handle)) + return ret ? ret : PTR_ERR(handle); + + ext4_update_inode_size(inode, epos); + ret2 = ext4_mark_inode_dirty(handle, inode); + ext4_update_inode_fsync_trans(handle, inode, 1); + ret3 = ext4_journal_stop(handle); + ret2 = ret3 ? ret3 : ret2; + + if (epos > old_size) + pagecache_isize_extended(inode, old_size, epos); + + return ret ? ret : ret2; } static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); -- 2.39.2