From: Peng Wu wupeng58@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8USBA CVE: NA
--------------------------------
Add kernel param reliable_debug to enable separate memory reliable features on demand: - Page cache will not use reliable memory when passing option "P" to reliable_debug in cmdline. - Shmem will not use reliable memory when passing opiton "S" to reliable_debug in cmdline.
Signed-off-by: Peng Wu wupeng58@huawei.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- .../admin-guide/kernel-parameters.txt | 7 ++++ include/linux/mem_reliable.h | 29 +++++++++++++- mm/mem_reliable.c | 40 +++++++++++++++++++ mm/shmem.c | 3 ++ 4 files changed, 77 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 01fc38217459..53b820d51822 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5518,6 +5518,13 @@ [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/admin-guide/cgroup-v1/cpusets.rst.
+ reliable_debug= [ARM64] + Format: [P][,S] + Only works with CONFIG_MEMORY_RELIABLE and + "kernelcore=reliable" is configured. + P: Page cache does not use the reliable memory. + S: The shmem does not use the reliable memory. + reserve= [KNL,BUGS] Force kernel to ignore I/O ports or memory Format: <base1>,<size1>[,<base2>,<size2>,...] Reserve I/O ports or memory so the kernel won't use diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index bdd605bf2d45..c47f928cfa59 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -14,10 +14,13 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled; extern struct file_operations proc_reliable_operations; +extern bool shmem_reliable; +extern bool pagecache_reliable;
void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz); bool mem_reliable_status(void); bool mem_reliable_hide_file(const char *name); +void shmem_reliable_init(void);
static inline bool mem_reliable_is_enabled(void) { @@ -46,6 +49,16 @@ static inline bool folio_reliable(struct folio *folio) return folio_zonenum(folio) < ZONE_MOVABLE; }
+static inline bool shmem_reliable_is_enabled(void) +{ + return shmem_reliable; +} + +static inline bool filemap_reliable_is_enabled(void) +{ + return pagecache_reliable; +} + static inline bool skip_non_mirrored_zone(gfp_t gfp, struct zoneref *z) { if (!mem_reliable_is_enabled()) @@ -66,19 +79,30 @@ static inline bool skip_non_mirrored_zone(gfp_t gfp, struct zoneref *z)
static inline void shmem_prepare_alloc(gfp_t *gfp_mask) { - if (mem_reliable_is_enabled()) + if (!mem_reliable_is_enabled()) + return; + + if (shmem_reliable_is_enabled()) *gfp_mask |= GFP_RELIABLE; + else + *gfp_mask &= ~GFP_RELIABLE; }
static inline void filemap_prepare_alloc(gfp_t *gfp_mask) { - if (mem_reliable_is_enabled()) + if (!mem_reliable_is_enabled()) + return; + + if (filemap_reliable_is_enabled()) *gfp_mask |= GFP_RELIABLE; + else + *gfp_mask &= ~GFP_RELIABLE; } #else #define reliable_enabled 0
static inline bool mem_reliable_is_enabled(void) { return false; } +static inline bool filemap_reliable_is_enabled(void) { return false; } static inline void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz) {} static inline bool page_reliable(struct page *page) { return false; } @@ -91,6 +115,7 @@ static inline bool mem_reliable_status(void) { return false; } static inline bool mem_reliable_hide_file(const char *name) { return false; } static inline void shmem_prepare_alloc(gfp_t *gfp_mask) {} static inline void filemap_prepare_alloc(gfp_t *gfp_mask) {} +static inline void shmem_reliable_init(void) {} #endif
#endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 53d11d48555e..c94e62407263 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -13,6 +13,8 @@ DEFINE_STATIC_KEY_FALSE(mem_reliable); EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled; +bool shmem_reliable __read_mostly = true; +bool pagecache_reliable __read_mostly = true;
bool mem_reliable_status(void) { @@ -63,3 +65,41 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz)
pr_info("init succeed, mirrored memory size(%lu)\n", mirrored_sz); } + +void shmem_reliable_init(void) +{ + if (!mem_reliable_is_enabled() || !shmem_reliable_is_enabled()) + shmem_reliable = false; +} + +static int __init setup_reliable_debug(char *str) +{ + if (*str++ != '=' || !*str) + /* + * No options specified. + */ + goto out; + + /* + * Determine which debug features should be switched on + */ + for (; *str && *str != ','; str++) { + switch (*str) { + case 'P': + pagecache_reliable = false; + pr_info("disable page cache use reliable memory\n"); + break; + case 'S': + shmem_reliable = false; + pr_info("disable shmem use reliable memory\n"); + break; + default: + pr_err("reliable_debug option '%c' unknown. skipped\n", + *str); + } + } + +out: + return 1; +} +__setup("reliable_debug", setup_reliable_debug); diff --git a/mm/shmem.c b/mm/shmem.c index 71fd12ad473f..33f4a5d36f62 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4627,6 +4627,9 @@ void __init shmem_init(void) else shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ #endif + + shmem_reliable_init(); + return;
out1: