hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8EXN6 CVE: NA
--------------------------------
The previous two patches fix a corner case about out-of-memory. Add a sysctl to enable/disable readahead_early_break mode.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- Documentation/admin-guide/sysctl/vm.rst | 7 +++++++ include/linux/pagemap.h | 2 ++ kernel/sysctl.c | 9 +++++++++ mm/filemap.c | 2 +- mm/readahead.c | 5 ++++- 5 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index b508acfdde2e..0880f769f4dd 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -80,6 +80,7 @@ Currently, these files are in /proc/sys/vm: - cache_reclaim_weight - cache_reclaim_enable - cache_limit_mbytes +- readahead_early_break
admin_reserve_kbytes @@ -1089,3 +1090,9 @@ cache_limit_mbytes
This is used to set the upper limit of page cache in megabytes. Page cache will be reclaimed periodically if page cache is over limit. + +readahead_early_break +===================== + +This is used to break readahead when reached memcg limit or there are too +many folio that are recently evicted. diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0bfa9cce6589..2568ff96c13a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -817,6 +817,8 @@ struct readahead_control {
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
+extern int vm_readahead_early_break; + void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f3f43b2def7f..97dda5113657 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -3335,6 +3335,15 @@ static struct ctl_table vm_table[] = { .extra2 = SYSCTL_ONE, }, #endif + { + .procname = "readahead_early_break", + .data = &vm_readahead_early_break, + .maxlen = sizeof(vm_readahead_early_break), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } };
diff --git a/mm/filemap.c b/mm/filemap.c index 1cfcb82223fa..dbf5379d74c7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3175,7 +3175,7 @@ void filemap_map_pages(struct vm_fault *vmf, * Don't decrease mmap_miss in this scenario to make sure * we can stop read-ahead. */ - if (mmap_miss > 0 && !PageWorkingset(page)) + if (mmap_miss > 0 && !(vm_readahead_early_break && PageWorkingset(page))) mmap_miss--;
vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; diff --git a/mm/readahead.c b/mm/readahead.c index 22dd9c8fe808..b2652bc20623 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -27,6 +27,9 @@ #include "internal.h"
#define READAHEAD_FIRST_SIZE (2 * 1024 * 1024) + +int vm_readahead_early_break; + /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -228,7 +231,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, if (ret < 0) { put_page(page); read_pages(ractl, &page_pool, true); - if (ret == -ENOMEM) + if (vm_readahead_early_break && (ret == -ENOMEM)) break; continue; }