From: Christoph Hellwig hch@lst.de
mainline inclusion from mainline-v6.9-rc1 commit f525152a0f0ff34eb92b322703d76ba37095f556 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9DN5Z CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Split the loop body that calls into the file system to map a block and add it to the ioend into a separate helper to prefer for refactoring of the surrounding code.
Note that this was the only place in iomap_writepage_map that could return an error, so include the call to ->discard_folio into the new helper as that will help to avoid code duplication in the future.
Signed-off-by: Christoph Hellwig hch@lst.de Link: https://lore.kernel.org/r/20231207072710.176093-12-hch@lst.de Reviewed-by: Darrick J. Wong djwong@kernel.org Signed-off-by: Christian Brauner brauner@kernel.org Signed-off-by: Zhang Yi yi.zhang@huawei.com --- fs/iomap/buffered-io.c | 70 ++++++++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 27 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index ac67152ba2a7..5a4ef75ff5d7 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1709,6 +1709,45 @@ static void iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, wbc_account_cgroup_owner(wbc, &folio->page, len); }
+static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc, + struct writeback_control *wbc, struct folio *folio, + struct inode *inode, u64 pos, unsigned *count, + struct list_head *submit_list) +{ + int error; + + error = wpc->ops->map_blocks(wpc, inode, pos); + if (error) + goto fail; + trace_iomap_writepage_map(inode, &wpc->iomap); + + switch (wpc->iomap.type) { + case IOMAP_INLINE: + WARN_ON_ONCE(1); + error = -EIO; + break; + case IOMAP_HOLE: + break; + default: + iomap_add_to_ioend(wpc, wbc, folio, inode, pos, submit_list); + (*count)++; + } + +fail: + /* + * We cannot cancel the ioend directly here on error. We may have + * already set other pages under writeback and hence we have to run I/O + * completion to mark the error state of the pages under writeback + * appropriately. + * + * Just let the file system know what portion of the folio failed to + * map. + */ + if (error && wpc->ops->discard_folio) + wpc->ops->discard_folio(folio, pos); + return error; +} + /* * Check interaction of the folio with the file end. * @@ -1793,7 +1832,8 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, unsigned nblocks = i_blocks_per_folio(inode, folio); u64 pos = folio_pos(folio); u64 end_pos = pos + folio_size(folio); - int error = 0, count = 0, i; + unsigned count = 0; + int error = 0, i; LIST_HEAD(submit_list);
trace_iomap_writepage(inode, pos, folio_size(folio)); @@ -1819,19 +1859,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { if (ifs && !ifs_block_is_dirty(folio, ifs, i)) continue; - - error = wpc->ops->map_blocks(wpc, inode, pos); + error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, pos, + &count, &submit_list); if (error) break; - trace_iomap_writepage_map(inode, &wpc->iomap); - if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) { - error = -EIO; - break; - } - if (wpc->iomap.type == IOMAP_HOLE) - continue; - iomap_add_to_ioend(wpc, wbc, folio, inode, pos, &submit_list); - count++; } if (count) wpc->nr_folios++; @@ -1841,21 +1872,6 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, WARN_ON_ONCE(folio_test_writeback(folio)); WARN_ON_ONCE(folio_test_dirty(folio));
- /* - * We cannot cancel the ioend directly here on error. We may have - * already set other pages under writeback and hence we have to run I/O - * completion to mark the error state of the pages under writeback - * appropriately. - */ - if (unlikely(error)) { - /* - * Let the filesystem know what portion of the current page - * failed to map. - */ - if (wpc->ops->discard_folio) - wpc->ops->discard_folio(folio, pos); - } - /* * We can have dirty bits set past end of file in page_mkwrite path * while mapping the last partial folio. Hence it's better to clear