From: Zhou Guanghui zhouguanghui1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
------------------------------------------
This feature depends on the overall memory reliable feature. When the shared memory reliable feature is enabled, the pages used by the shared memory are allocated from the mirrored region by default. If the mirrored region is insufficient, you can allocate resources from the non-mirrored region.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/admin-guide/kernel-parameters.txt | 3 ++- include/linux/mem_reliable.h | 10 ++++++++++ mm/mem_reliable.c | 16 ++++++++++++++++ mm/shmem.c | 12 ++++++++++++ 4 files changed, 40 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3fc729aab31a6..8b6f7071072b7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1973,13 +1973,14 @@ some extension. These two features are alternatives.
reliable_debug= [ARM64] - Format: [F] + Format: [F][,S] Only works with CONFIG_MEMORY_RELIABLE and "kernelcore=reliable" is configured. F: User tasks with PF_RELIABLE will not allocate memory from non-mirrored region if this allocation from mirrored region failed. Pagecache and tmpfs will follow this rule too. + S: The shmem does not use the reliable memory.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. Format: <Controller#>[,poll interval] diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index c9c4d94a4df46..0641c7a88c786 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -17,10 +17,12 @@ extern bool reliable_enabled; extern atomic_long_t reliable_user_used_nr_page; extern unsigned long task_reliable_limit __read_mostly; extern bool reliable_allow_fallback; +extern bool shmem_reliable;
extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn); +extern void shmem_reliable_init(void); extern void reliable_report_meminfo(struct seq_file *m); extern bool page_reliable(struct page *page); extern void reliable_report_usage(struct seq_file *m, struct mm_struct *mm); @@ -75,6 +77,12 @@ static inline bool reliable_allow_fb_enabled(void) { return reliable_allow_fallback; } + +static inline bool shmem_reliable_is_enabled(void) +{ + return shmem_reliable; +} + #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false @@ -83,6 +91,7 @@ static inline bool mem_reliable_is_enabled(void) { return false; } static inline void add_reliable_mem_size(long sz) {} static inline void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) {} +static inline void shmem_reliable_init(void) {} static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) { @@ -104,6 +113,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask) {} +static inline bool shmem_reliable_is_enabled(void) { return false; }
#endif
diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 60a214e3b28f7..32a0270b494d2 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -18,6 +18,7 @@ atomic_long_t reliable_user_used_nr_page; /* reliable user limit for user tasks with reliable flag */ unsigned long task_reliable_limit = ULONG_MAX; bool reliable_allow_fallback __read_mostly = true; +bool shmem_reliable __read_mostly = true;
void add_reliable_mem_size(long sz) { @@ -88,6 +89,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) atomic_long_read(&total_reliable_mem)); }
+void shmem_reliable_init(void) +{ + if (!shmem_reliable_is_enabled()) + return; + + if (!mem_reliable_is_enabled()) { + shmem_reliable = false; + pr_info("shmem reliable disabled.\n"); + } +} + static unsigned long total_reliable_mem_sz(void) { return atomic_long_read(&total_reliable_mem); @@ -223,6 +235,10 @@ static int __init setup_reliable_debug(char *str) reliable_allow_fallback = false; pr_info("fallback disabled."); break; + case 'S': + shmem_reliable = false; + pr_info("shmem reliable disabled."); + break; default: pr_err("reliable_debug option '%c' unknown. skipped\n", *str); diff --git a/mm/shmem.c b/mm/shmem.c index 16bb7806a25e6..e27fc90bab412 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1593,6 +1593,14 @@ static struct page *shmem_alloc_page(gfp_t gfp, return page; }
+static inline void shmem_prepare_alloc(gfp_t *gfp_mask) +{ + if (!shmem_reliable_is_enabled()) + return; + + *gfp_mask |= ___GFP_RELIABILITY; +} + static struct page *shmem_alloc_and_acct_page(gfp_t gfp, struct inode *inode, pgoff_t index, bool huge, int node_id) @@ -1609,6 +1617,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, if (!shmem_inode_acct_block(inode, nr)) goto failed;
+ shmem_prepare_alloc(&gfp); + if (huge) page = shmem_alloc_hugepage(gfp, info, index, node_id); else @@ -3941,6 +3951,8 @@ int __init shmem_init(void) else shmem_huge = 0; /* just in case it was patched */ #endif + + shmem_reliable_init(); return 0;
out1: