From: Zhou Guanghui zhouguanghui1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
------------------------------------------
Add ReliableShmem in /proc/meminfo to show reliable memory info used by shmem.
- ReliableShmem: reliable memory used by shmem
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/filesystems/proc.txt | 2 ++ include/linux/mem_reliable.h | 11 +++++++++++ mm/mem_reliable.c | 10 ++++++++++ mm/shmem.c | 5 +++++ 4 files changed, 28 insertions(+)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 1ef781f33b376..78c76d24f9f7d 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -884,6 +884,7 @@ ShmemHugePages: 0 kB ShmemPmdMapped: 0 kB ReliableTotal: 7340032 kB ReliableUsed: 418824 kB +ReliableShmem: 96 kB
MemTotal: Total usable ram (i.e. physical ram minus a few reserved @@ -976,6 +977,7 @@ VmallocChunk: largest contiguous block of vmalloc area which is free allocations. This stat excludes the cost of metadata. ReliableTotal: Total reliable memory size ReliableUsed: The used amount of reliable memory +ReliableShmem: Reliable memory used by shmem
..............................................................................
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 0641c7a88c786..4b51dfc513fc4 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -7,6 +7,7 @@ #include <linux/mmzone.h> #include <linux/mm_types.h> #include <linux/sched.h> +#include <linux/percpu_counter.h>
#ifdef CONFIG_MEMORY_RELIABLE @@ -18,6 +19,7 @@ extern atomic_long_t reliable_user_used_nr_page; extern unsigned long task_reliable_limit __read_mostly; extern bool reliable_allow_fallback; extern bool shmem_reliable; +extern struct percpu_counter reliable_shmem_used_nr_page;
extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, @@ -83,6 +85,12 @@ static inline bool shmem_reliable_is_enabled(void) return shmem_reliable; }
+static inline void shmem_reliable_page_counter(struct page *page, int nr_page) +{ + if (shmem_reliable_is_enabled() && page_reliable(page)) + percpu_counter_add(&reliable_shmem_used_nr_page, nr_page); +} + #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false @@ -114,6 +122,9 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask) {} static inline bool shmem_reliable_is_enabled(void) { return false; } +static inline void shmem_reliable_page_counter(struct page *page, int nr_page) +{ +}
#endif
diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 32a0270b494d2..89164bc5728b9 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -19,6 +19,7 @@ atomic_long_t reliable_user_used_nr_page; unsigned long task_reliable_limit = ULONG_MAX; bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; +struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
void add_reliable_mem_size(long sz) { @@ -97,7 +98,10 @@ void shmem_reliable_init(void) if (!mem_reliable_is_enabled()) { shmem_reliable = false; pr_info("shmem reliable disabled.\n"); + return; } + + percpu_counter_init(&reliable_shmem_used_nr_page, 0, GFP_KERNEL); }
static unsigned long total_reliable_mem_sz(void) @@ -124,6 +128,12 @@ void reliable_report_meminfo(struct seq_file *m) total_reliable_mem_sz() >> 10); seq_printf(m, "ReliableUsed: %8lu kB\n", used_reliable_mem_sz() >> 10); + + if (shmem_reliable_is_enabled()) { + unsigned long shmem = (unsigned long)percpu_counter_sum( + &reliable_shmem_used_nr_page) << (PAGE_SHIFT - 10); + seq_printf(m, "ReliableShmem: %8lu kB\n", shmem); + } } }
diff --git a/mm/shmem.c b/mm/shmem.c index e27fc90bab412..aabf0dc626da5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -957,6 +957,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, truncate_inode_page(mapping, page); } } + shmem_reliable_page_counter( + page, -(1 << compound_order(page))); unlock_page(page); } pagevec_remove_exceptionals(&pvec); @@ -1067,6 +1069,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, break; } } + shmem_reliable_page_counter( + page, -(1 << compound_order(page))); unlock_page(page); } pagevec_remove_exceptionals(&pvec); @@ -1962,6 +1966,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); shmem_recalc_inode(inode); spin_unlock_irq(&info->lock); + shmem_reliable_page_counter(page, 1 << compound_order(page)); alloced = true;
if (PageTransHuge(page) &&