From: Chen Wandun chenwandun@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Add cmdline for the reliable memory usage of page cache. Page cache will not use reliable memory when passing option "P" to reliable_debug in cmdline.
Signed-off-by: Chen Wandun chenwandun@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/admin-guide/kernel-parameters.txt | 3 ++- include/linux/mem_reliable.h | 8 ++++++++ include/linux/pagemap.h | 4 +++- mm/filemap.c | 6 +++++- mm/mem_reliable.c | 5 +++++ 5 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8b6f7071072b7..a46b2fe191ba7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1973,7 +1973,7 @@ some extension. These two features are alternatives.
reliable_debug= [ARM64] - Format: [F][,S] + Format: [F][,S][,P] Only works with CONFIG_MEMORY_RELIABLE and "kernelcore=reliable" is configured. F: User tasks with PF_RELIABLE will not allocate @@ -1981,6 +1981,7 @@ from mirrored region failed. Pagecache and tmpfs will follow this rule too. S: The shmem does not use the reliable memory. + P: Page cache does not use the reliable memory.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. Format: <Controller#>[,poll interval] diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 4b51dfc513fc4..0c5f80428e973 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -20,6 +20,7 @@ extern unsigned long task_reliable_limit __read_mostly; extern bool reliable_allow_fallback; extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; +extern bool pagecache_use_reliable_mem;
extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, @@ -85,6 +86,11 @@ static inline bool shmem_reliable_is_enabled(void) return shmem_reliable; }
+static inline bool pagecache_reliable_is_enabled(void) +{ + return pagecache_use_reliable_mem; +} + static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { if (shmem_reliable_is_enabled() && page_reliable(page)) @@ -94,6 +100,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false +#define pagecache_use_reliable_mem 0
static inline bool mem_reliable_is_enabled(void) { return false; } static inline void add_reliable_mem_size(long sz) {} @@ -126,6 +133,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { }
+static inline bool pagecache_reliable_is_enabled(void) { return false; } #endif
#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a6457acd7462e..77563c03618c9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -249,7 +249,9 @@ extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { - gfp |= ___GFP_RELIABILITY; + if (pagecache_reliable_is_enabled()) + gfp |= ___GFP_RELIABILITY; + return alloc_pages(gfp, 0); } #endif diff --git a/mm/filemap.c b/mm/filemap.c index c30e5c1eb77c2..4dc3cc5834a55 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1042,7 +1042,11 @@ struct page *__page_cache_alloc(gfp_t gfp) int n; struct page *page;
- gfp |= ___GFP_RELIABILITY; + if (pagecache_reliable_is_enabled()) + gfp |= ___GFP_RELIABILITY; + else + WARN_ON_ONCE(gfp & ___GFP_RELIABILITY); + if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; do { diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 89164bc5728b9..5a32977b674fd 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -21,6 +21,7 @@ bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
+bool pagecache_use_reliable_mem __read_mostly = true; void add_reliable_mem_size(long sz) { atomic_long_add(sz, &total_reliable_mem); @@ -249,6 +250,10 @@ static int __init setup_reliable_debug(char *str) shmem_reliable = false; pr_info("shmem reliable disabled."); break; + case 'P': + pagecache_use_reliable_mem = false; + pr_info("disable page cache use reliable memory\n"); + break; default: pr_err("reliable_debug option '%c' unknown. skipped\n", *str);