From: Chen Wandun chenwandun@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Reliable page cache should be account in update_lru_sizes and move_active_pages_to_lru.
update_lru_sizes is used to account page after pages are isolated from lru in batch.
move_active_pages_to_lru will add pages to lru, so statistic should update synchronously.
Signed-off-by: Chen Wandun chenwandun@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/mem_reliable.h | 4 ++++ mm/mem_reliable.c | 17 +++++++++++++++-- mm/vmscan.c | 4 ++++ 3 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 00c915f583699..83ae566fac5fc 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -40,6 +40,8 @@ extern bool mem_reliable_status(void); extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val); extern void page_cache_prepare_alloc(gfp_t *gfp); +extern void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val);
static inline bool mem_reliable_is_enabled(void) { @@ -174,6 +176,8 @@ static inline void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {} +static inline void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val) {} #endif
#endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index d02159b0182fa..8c664e0add91c 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -64,12 +64,25 @@ static bool reliable_and_lru_check(enum lru_list lru, struct page *page) if (!page_reliable(page)) return false;
- if (lru != LRU_ACTIVE_FILE && lru != LRU_INACTIVE_FILE) + if (!is_file_lru(lru)) return false;
return true; }
+void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val) +{ + if (!mem_reliable_is_enabled()) + return; + + if (zid < 0 || zid >= MAX_NR_ZONES) + return; + + if (zid < ZONE_MOVABLE && is_file_lru(lru)) + this_cpu_add(pagecache_reliable_pages, val); +} + void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) { if (!reliable_and_lru_check(lru, page)) @@ -177,7 +190,7 @@ static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) void reliable_report_meminfo(struct seq_file *m) { bool pagecache_enabled = pagecache_reliable_is_enabled(); - unsigned long nr_pagecache_pages = 0; + long nr_pagecache_pages = 0; long nr_buddy_pages = 0; int cpu;
diff --git a/mm/vmscan.c b/mm/vmscan.c index 15e5864c51050..994c116306aa2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1683,6 +1683,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); #endif + page_cache_reliable_lru_add_batch(zid, lru, + -nr_zone_taken[zid]); }
} @@ -2099,6 +2101,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); list_move(&page->lru, &lruvec->lists[lru]);
+ page_cache_reliable_lru_add(lru, page, nr_pages); + if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page);