From: "Matthew Wilcox (Oracle)" willy@infradead.org
mainline inclusion from mainline-v6.11-rc1 commit 9aac777aaf9459786bc8463e6cbfc7e7e1abd1f9 category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/IAHY3K
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Modelled after the loop in iomap_write_iter(), copy larger chunks from userspace if the filesystem has created large folios.
[hch: use mapping_max_folio_size to keep supporting file systems that do not support large folios]
Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Signed-off-by: Christoph Hellwig hch@lst.de Tested-by: Shaun Tancheff shaun.tancheff@hpe.com Tested-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Anna Schumaker Anna.Schumaker@Netapp.com Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/filemap.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c index 9338f805cc4c..eb96ddf00ba8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -4038,21 +4038,24 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) loff_t pos = iocb->ki_pos; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; + size_t chunk = mapping_max_folio_size(mapping); long status = 0; ssize_t written = 0;
do { struct page *page; - unsigned long offset; /* Offset into pagecache page */ - unsigned long bytes; /* Bytes to write to page */ + struct folio *folio; + size_t offset; /* Offset into folio */ + size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ void *fsdata = NULL;
- offset = (pos & (PAGE_SIZE - 1)); - bytes = min_t(unsigned long, PAGE_SIZE - offset, - iov_iter_count(i)); + bytes = iov_iter_count(i); +retry: + offset = pos & (chunk - 1); + bytes = min(chunk - offset, bytes); + balance_dirty_pages_ratelimited(mapping);
-again: /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the @@ -4074,11 +4077,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) if (unlikely(status < 0)) break;
+ folio = page_folio(page); + offset = offset_in_folio(folio, pos); + if (bytes > folio_size(folio) - offset) + bytes = folio_size(folio) - offset; + if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_dcache_folio(folio);
- copied = copy_page_from_iter_atomic(page, offset, bytes, i); - flush_dcache_page(page); + copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); + flush_dcache_folio(folio);
status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); @@ -4096,14 +4104,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) * halfway through, might be a race with munmap, * might be severe memory pressure. */ - if (copied) + if (chunk > PAGE_SIZE) + chunk /= 2; + if (copied) { bytes = copied; - goto again; + goto retry; + } + } else { + pos += status; + written += status; } - pos += status; - written += status; - - balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i));
if (!written)