From: Chen Wandun chenwandun@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Add interface /proc/sys/vm/reliable_pagecache_max_bytes to set the max size for reliable page cache, the max size cant beyond total reliable ram.
the whole reliable memory feature depend on kernelcore=mirror, and which depend on NUMA, so remove redundant code in UMA.
Signed-off-by: Chen Wandun chenwandun@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/mem_reliable.h | 2 ++ include/linux/pagemap.h | 10 +------ mm/filemap.c | 5 +--- mm/mem_reliable.c | 53 ++++++++++++++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 13 deletions(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 2d017de08a679..2d3577ce71134 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -41,6 +41,7 @@ extern bool mem_reliable_status(void);
extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val); +extern void page_cache_prepare_alloc(gfp_t *gfp);
static inline bool mem_reliable_is_enabled(void) { @@ -175,6 +176,7 @@ static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) {} +static inline void page_cache_prepare_alloc(gfp_t *gfp) {} #endif
#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a7d83fed0601d..085aed892ce58 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -249,15 +249,7 @@ extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { - struct page *page; - - if (pagecache_reliable_is_enabled()) - gfp |= ___GFP_RELIABILITY; - - page = alloc_pages(gfp, 0); - page_cache_fallback_inc(gfp, page); - - return page; + return alloc_pages(gfp, 0); } #endif
diff --git a/mm/filemap.c b/mm/filemap.c index 2827e2b670e02..2ac6ddf630d80 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1040,10 +1040,7 @@ struct page *__page_cache_alloc(gfp_t gfp) int n; struct page *page;
- if (pagecache_reliable_is_enabled()) - gfp |= ___GFP_RELIABILITY; - else - WARN_ON_ONCE(gfp & ___GFP_RELIABILITY); + page_cache_prepare_alloc(&gfp);
if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 17776f387031d..dcdd937148b60 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -37,6 +37,10 @@ long shmem_reliable_nr_page = LONG_MAX; bool pagecache_use_reliable_mem __read_mostly = true; atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); DEFINE_PER_CPU(long, pagecache_reliable_pages); + +static unsigned long zero; +static unsigned long reliable_pagecache_max_bytes = ULONG_MAX; + bool mem_reliable_status(void) { return mem_reliable_is_enabled(); @@ -394,6 +398,23 @@ int reliable_shmem_bytes_limit_handler(struct ctl_table *table, int write, } #endif
+int reliable_pagecache_max_bytes_write(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + unsigned long old_value = reliable_pagecache_max_bytes; + int ret; + + ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (!ret && write) { + if (reliable_pagecache_max_bytes > total_reliable_mem_sz()) { + reliable_pagecache_max_bytes = old_value; + return -EINVAL; + } + } + + return ret; +} + static struct ctl_table reliable_ctl_table[] = { { .procname = "task_reliable_limit", @@ -425,6 +446,14 @@ static struct ctl_table reliable_ctl_table[] = { .proc_handler = reliable_shmem_bytes_limit_handler, }, #endif + { + .procname = "reliable_pagecache_max_bytes", + .data = &reliable_pagecache_max_bytes, + .maxlen = sizeof(reliable_pagecache_max_bytes), + .mode = 0644, + .proc_handler = reliable_pagecache_max_bytes_write, + .extra1 = &zero, + }, {} };
@@ -438,6 +467,30 @@ static struct ctl_table reliable_dir_table[] = { {} };
+void page_cache_prepare_alloc(gfp_t *gfp) +{ + long nr_reliable = 0; + int cpu; + + if (!mem_reliable_is_enabled()) + return; + + for_each_possible_cpu(cpu) + nr_reliable += this_cpu_read(pagecache_reliable_pages); + + if (nr_reliable < 0) + goto no_reliable; + + if (nr_reliable > reliable_pagecache_max_bytes >> PAGE_SHIFT) + goto no_reliable; + + *gfp |= ___GFP_RELIABILITY; + return; + +no_reliable: + *gfp &= ~___GFP_RELIABILITY; +} + static int __init reliable_sysctl_init(void) { if (!mem_reliable_is_enabled())