From: Dave Chinner dchinner@redhat.com
mainline inclusion from mainline-v6.1-rc4 commit d7b64041164ca177170191d2ad775da074ab2926 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I76JSK CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
A recent multithreaded write data corruption has been uncovered in the iomap write code. The core of the problem is partial folio writes can be flushed to disk while a new racing write can map it and fill the rest of the page:
writeback new write
allocate blocks blocks are unwritten submit IO ..... map blocks iomap indicates UNWRITTEN range loop { lock folio copyin data ..... IO completes runs unwritten extent conv blocks are marked written <iomap now stale> get next folio }
Now add memory pressure such that memory reclaim evicts the partially written folio that has already been written to disk.
When the new write finally gets to the last partial page of the new write, it does not find it in cache, so it instantiates a new page, sees the iomap is unwritten, and zeros the part of the page that it does not have data from. This overwrites the data on disk that was originally written.
The full description of the corruption mechanism can be found here:
https://lore.kernel.org/linux-xfs/20220817093627.GZ3600936@dread.disaster.ar...
To solve this problem, we need to check whether the iomap is still valid after we lock each folio during the write. We have to do it after we lock the page so that we don't end up with state changes occurring while we wait for the folio to be locked.
Hence we need a mechanism to be able to check that the cached iomap is still valid (similar to what we already do in buffered writeback), and we need a way for ->begin_write to back out and tell the high level iomap iterator that we need to remap the remaining write range.
The iomap needs to grow some storage for the validity cookie that the filesystem provides to travel with the iomap. XFS, in particular, also needs to know some more information about what the iomap maps (attribute extents rather than file data extents) to for the validity cookie to cover all the types of iomaps we might need to validate.
Signed-off-by: Dave Chinner dchinner@redhat.com Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Darrick J. Wong djwong@kernel.org
conflicts: include/linux/iomap.h fs/iomap/buffered-io.c fs/iomap/apply.c
Signed-off-by: Ye Bin yebin10@huawei.com Signed-off-by: Long Li leo.lilong@huawei.com --- fs/iomap/apply.c | 14 +++++++++++--- fs/iomap/buffered-io.c | 27 +++++++++++++++++++++++++++ include/linux/iomap.h | 39 +++++++++++++++++++++++++++++++++------ 3 files changed, 71 insertions(+), 9 deletions(-)
diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c index 26ab6563181f..5595d51c3ca1 100644 --- a/fs/iomap/apply.c +++ b/fs/iomap/apply.c @@ -24,11 +24,16 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, const struct iomap_ops *ops, void *data, iomap_actor_t actor) { - struct iomap iomap = { .type = IOMAP_HOLE }; - struct iomap srcmap = { .type = IOMAP_HOLE }; - loff_t written = 0, ret; + struct iomap iomap; + struct iomap srcmap; + loff_t written, ret; u64 end;
+stale: + memset(&iomap, 0, sizeof(struct iomap)); + memset(&srcmap, 0, sizeof(struct iomap)); + written = 0; + trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
/* @@ -95,5 +100,8 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, flags, &iomap); }
+ if (!ret && !written && iomap.flags & IOMAP_F_STALE) + goto stale; + return written ? written : ret; } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 2209ce39511f..243de0dbc219 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -620,6 +620,25 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, goto out_no_page; }
+ /* + * Now we have a locked folio, before we do anything with it we need to + * check that the iomap we have cached is not stale. The inode extent + * mapping can change due to concurrent IO in flight (e.g. + * IOMAP_UNWRITTEN state can change and memory reclaim could have + * reclaimed a previously partially written page at this index after IO + * completion before this write reaches this file offset) and hence we + * could do the wrong thing here (zero a page range incorrectly or fail + * to zero) and corrupt data. + */ + if (page_ops && page_ops->iomap_valid) { + bool iomap_valid = page_ops->iomap_valid(inode, iomap); + if (!iomap_valid) { + iomap->flags |= IOMAP_F_STALE; + status = 0; + goto out_unlock; + } + } + if (srcmap->type == IOMAP_INLINE) iomap_read_inline_data(inode, page, srcmap); else if (iomap->flags & IOMAP_F_BUFFER_HEAD) @@ -786,6 +805,8 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, srcmap); if (unlikely(status)) break; + if (iomap->flags & IOMAP_F_STALE) + break;
if (mapping_writably_mapped(inode->i_mapping)) flush_dcache_page(page); @@ -863,6 +884,8 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap); if (unlikely(status)) return status; + if (iomap->flags & IOMAP_F_STALE) + break;
status = iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap); @@ -911,6 +934,8 @@ static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length, status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap); if (status) return status; + if (iomap->flags & IOMAP_F_STALE) + return 0;
zero_user(page, offset, bytes); mark_page_accessed(page); @@ -938,6 +963,8 @@ static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos, bytes = iomap_zero(inode, pos, length, iomap, srcmap); if (bytes < 0) return bytes; + if (iomap->flags & IOMAP_F_STALE) + break;
pos += bytes; length -= bytes; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 0c95321f42fd..78520f28806a 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -50,20 +50,29 @@ struct vm_fault; * * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of * buffer heads for this mapping. + * + * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent + * rather than a file data extent. */ -#define IOMAP_F_NEW 0x01 -#define IOMAP_F_DIRTY 0x02 -#define IOMAP_F_SHARED 0x04 -#define IOMAP_F_MERGED 0x08 -#define IOMAP_F_BUFFER_HEAD 0x10 +#define IOMAP_F_NEW (1U << 0) +#define IOMAP_F_DIRTY (1U << 1) +#define IOMAP_F_SHARED (1U << 2) +#define IOMAP_F_MERGED (1U << 3) +#define IOMAP_F_BUFFER_HEAD (1U << 4) +#define IOMAP_F_XATTR (1U << 6)
/* * Flags set by the core iomap code during operations: * * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size * has changed as the result of this write operation. + * + * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file + * range it covers needs to be remapped by the high level before the operation + * can proceed. */ -#define IOMAP_F_SIZE_CHANGED 0x100 +#define IOMAP_F_SIZE_CHANGED (1U << 8) +#define IOMAP_F_STALE (1U << 9)
/* * Flags from 0x1000 up are for file system specific usage: @@ -89,6 +98,7 @@ struct iomap { void *inline_data; void *private; /* filesystem private */ const struct iomap_page_ops *page_ops; + u64 validity_cookie; /* used with .iomap_valid() */ };
static inline sector_t @@ -112,6 +122,23 @@ struct iomap_page_ops { struct iomap *iomap); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, struct page *page, struct iomap *iomap); + + /* + * Check that the cached iomap still maps correctly to the filesystem's + * internal extent map. FS internal extent maps can change while iomap + * is iterating a cached iomap, so this hook allows iomap to detect that + * the iomap needs to be refreshed during a long running write + * operation. + * + * The filesystem can store internal state (e.g. a sequence number) in + * iomap->validity_cookie when the iomap is first mapped to be able to + * detect changes between mapping time and whenever .iomap_valid() is + * called. + * + * This is called with the folio over the specified file position held + * locked by the iomap code. + */ + bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap); };
/*