From: Chen Wandun chenwandun@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Add statistics for usage of reliable page cache, Item "ReliableFileCache" in /proc/meminfo show the usage of reliable page cache.
Signed-off-by: Chen Wandun chenwandun@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/mem_reliable.h | 6 ++++++ include/linux/mm_inline.h | 5 +++++ mm/mem_reliable.c | 29 ++++++++++++++++++++++++++++- mm/vmscan.c | 1 + 4 files changed, 40 insertions(+), 1 deletion(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index a9d8e6780ec1b..2d017de08a679 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -39,6 +39,9 @@ extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask); extern bool mem_reliable_status(void);
+extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, + int val); + static inline bool mem_reliable_is_enabled(void) { return static_branch_likely(&mem_reliable); @@ -169,6 +172,9 @@ static inline bool mem_reliable_status(void) { return false; } static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline bool mem_reliable_watermark_ok(int nr_page) { return true; } static inline bool mem_reliable_shmem_limit_check(void) { return true; } +static inline void page_cache_reliable_lru_add(enum lru_list lru, + struct page *page, + int val) {} #endif
#endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index b0e3b4473ff2e..704a93c8f4506 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -4,6 +4,7 @@
#include <linux/huge_mm.h> #include <linux/swap.h> +#include <linux/mem_reliable.h>
/** * page_is_file_cache - should the page be on a file LRU or anon LRU? @@ -49,6 +50,8 @@ static __always_inline void add_page_to_lru_list(struct page *page, { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); + page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); + }
static __always_inline void add_page_to_lru_list_tail(struct page *page, @@ -56,6 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); + page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); }
static __always_inline void del_page_from_lru_list(struct page *page, @@ -63,6 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, { list_del(&page->lru); update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); + page_cache_reliable_lru_add(lru, page, -hpage_nr_pages(page)); }
/** diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 03fb350858ede..17776f387031d 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -36,7 +36,7 @@ long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); - +DEFINE_PER_CPU(long, pagecache_reliable_pages); bool mem_reliable_status(void) { return mem_reliable_is_enabled(); @@ -66,6 +66,25 @@ static bool is_fallback_page(gfp_t gfp, struct page *page) return ret; }
+static bool reliable_and_lru_check(enum lru_list lru, struct page *page) +{ + if (!page || !page_reliable(page)) + return false; + + if (lru != LRU_ACTIVE_FILE && lru != LRU_INACTIVE_FILE) + return false; + + return true; +} + +void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) +{ + if (!reliable_and_lru_check(lru, page)) + return; + + this_cpu_add(pagecache_reliable_pages, val); +} + void page_cache_fallback_inc(gfp_t gfp, struct page *page) { long num; @@ -196,6 +215,7 @@ void reliable_report_meminfo(struct seq_file *m)
if (pagecache_reliable_is_enabled()) { unsigned long num = 0; + int cpu;
num += global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE); @@ -203,6 +223,13 @@ void reliable_report_meminfo(struct seq_file *m) LRU_INACTIVE_FILE); seq_printf(m, "FileCache: %8lu kB\n", num << (PAGE_SHIFT - 10)); + + num = 0; + for_each_possible_cpu(cpu) + num += per_cpu(pagecache_reliable_pages, cpu); + + seq_printf(m, "ReliableFileCache:%8lu kB\n", + num << (PAGE_SHIFT - 10)); } } } diff --git a/mm/vmscan.c b/mm/vmscan.c index efe572fd090c0..15e5864c51050 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4596,6 +4596,7 @@ static int add_page_for_reclaim_swapcache(struct page *page, case 0: list_move(&head->lru, pagelist); update_lru_size(lruvec, lru, page_zonenum(head), -hpage_nr_pages(head)); + page_cache_reliable_lru_add(lru, head, -hpage_nr_pages(head)); break; case -EBUSY: list_move(&head->lru, src);