From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4PM01 CVE: NA
--------------------------------
Count reliable shmem usage.
Shmem usage is counted during NR_SHMEM.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/mem_reliable.h | 10 ++++++++++ mm/filemap.c | 9 +++++++-- mm/mem_reliable.c | 7 ++++++- mm/migrate.c | 5 +++++ mm/shmem.c | 4 ++++ 5 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 7e36ac6b4dd4..b2be7010c520 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -18,6 +18,7 @@ extern bool shmem_reliable; extern bool pagecache_reliable; extern struct percpu_counter pagecache_reliable_pages; extern struct percpu_counter anon_reliable_pages; +extern struct percpu_counter shmem_reliable_pages;
void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz); bool mem_reliable_status(void); @@ -113,6 +114,13 @@ static inline unsigned long task_reliable_used_pages(void)
return nr_pages; } + +static inline void shmem_reliable_folio_add(struct folio *folio, int nr_page) +{ + if (shmem_reliable_is_enabled() && folio_reliable(folio)) + percpu_counter_add(&shmem_reliable_pages, nr_page); +} + #else #define reliable_enabled 0
@@ -136,6 +144,8 @@ static inline void reliable_lru_add(enum lru_list lru, struct folio *folio, static inline void reliable_lru_add_batch(int zid, enum lru_list lru, int val) {} static inline bool mem_reliable_counter_initialized(void) { return false; } +static inline void shmem_reliable_folio_add(struct folio *folio, + int nr_page) {} #endif
#endif diff --git a/mm/filemap.c b/mm/filemap.c index e75743ba4a98..571a5c5cd372 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -185,6 +185,7 @@ static void filemap_unaccount_folio(struct address_space *mapping, __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); if (folio_test_swapbacked(folio)) { __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); + shmem_reliable_folio_add(folio, -nr); if (folio_test_pmd_mappable(folio)) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); } else if (folio_test_pmd_mappable(folio)) { @@ -830,10 +831,14 @@ void replace_page_cache_folio(struct folio *old, struct folio *new) __lruvec_stat_sub_folio(old, NR_FILE_PAGES); if (!folio_test_hugetlb(new)) __lruvec_stat_add_folio(new, NR_FILE_PAGES); - if (folio_test_swapbacked(old)) + if (folio_test_swapbacked(old)) { __lruvec_stat_sub_folio(old, NR_SHMEM); - if (folio_test_swapbacked(new)) + shmem_reliable_folio_add(old, -folio_nr_pages(old)); + } + if (folio_test_swapbacked(new)) { __lruvec_stat_add_folio(new, NR_SHMEM); + shmem_reliable_folio_add(new, folio_nr_pages(new)); + } xas_unlock_irq(&xas); if (free_folio) free_folio(old); diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index be2f3fdfcf1c..9251f82255db 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -19,6 +19,7 @@ bool shmem_reliable __read_mostly = true; bool pagecache_reliable __read_mostly = true; struct percpu_counter pagecache_reliable_pages; struct percpu_counter anon_reliable_pages; +struct percpu_counter shmem_reliable_pages;
bool mem_reliable_counter_initialized(void) { @@ -78,8 +79,12 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz)
void shmem_reliable_init(void) { - if (!mem_reliable_is_enabled() || !shmem_reliable_is_enabled()) + if (!mem_reliable_is_enabled() || !shmem_reliable_is_enabled()) { shmem_reliable = false; + return; + } + + percpu_counter_init(&shmem_reliable_pages, 0, GFP_KERNEL); }
void reliable_lru_add_batch(int zid, enum lru_list lru, int val) diff --git a/mm/migrate.c b/mm/migrate.c index 5aab4994c4b5..322c63e6f9be 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -465,6 +465,11 @@ int folio_migrate_mapping(struct address_space *mapping, xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */
+ if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { + shmem_reliable_folio_add(folio, -nr); + shmem_reliable_folio_add(newfolio, nr); + } + /* * If moved to a different zone then also account * the page for that zone. Other VM counters will be diff --git a/mm/shmem.c b/mm/shmem.c index 33f4a5d36f62..0d6807b608ed 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -808,6 +808,7 @@ static int shmem_add_to_page_cache(struct folio *folio, mapping->nrpages += nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); + shmem_reliable_folio_add(folio, nr); unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); @@ -839,6 +840,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) mapping->nrpages -= nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); + shmem_reliable_folio_add(folio, -nr); xa_unlock_irq(&mapping->i_pages); folio_put(folio); BUG_ON(error); @@ -1756,8 +1758,10 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, mem_cgroup_migrate(old, new); __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); __lruvec_stat_mod_folio(new, NR_SHMEM, 1); + shmem_reliable_folio_add(new, 1); __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); __lruvec_stat_mod_folio(old, NR_SHMEM, -1); + shmem_reliable_folio_add(old, -1); } xa_unlock_irq(&swap_mapping->i_pages);