From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8USBA CVE: NA
--------------------------------
Count reliable page cache usage.
Page cache usage is counted during update_lru_size.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/mem_reliable.h | 20 ++++++++++++++ include/linux/mm_inline.h | 6 +++++ include/linux/mmzone.h | 5 ++++ mm/mem_reliable.c | 52 ++++++++++++++++++++++++++++++++++++ mm/vmscan.c | 2 ++ 5 files changed, 85 insertions(+)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index c47f928cfa59..7e36ac6b4dd4 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -16,11 +16,16 @@ extern bool reliable_enabled; extern struct file_operations proc_reliable_operations; extern bool shmem_reliable; extern bool pagecache_reliable; +extern struct percpu_counter pagecache_reliable_pages; +extern struct percpu_counter anon_reliable_pages;
void mem_reliable_init(bool has_unmirrored_mem, unsigned long mirrored_sz); bool mem_reliable_status(void); bool mem_reliable_hide_file(const char *name); void shmem_reliable_init(void); +void reliable_lru_add(enum lru_list lru, struct folio *folio, int val); +void reliable_lru_add_batch(int zid, enum lru_list lru, int val); +bool mem_reliable_counter_initialized(void);
static inline bool mem_reliable_is_enabled(void) { @@ -98,6 +103,16 @@ static inline void filemap_prepare_alloc(gfp_t *gfp_mask) else *gfp_mask &= ~GFP_RELIABLE; } + +static inline unsigned long task_reliable_used_pages(void) +{ + s64 nr_pages; + + nr_pages = percpu_counter_read_positive(&pagecache_reliable_pages); + nr_pages += percpu_counter_read_positive(&anon_reliable_pages); + + return nr_pages; +} #else #define reliable_enabled 0
@@ -116,6 +131,11 @@ static inline bool mem_reliable_hide_file(const char *name) { return false; } static inline void shmem_prepare_alloc(gfp_t *gfp_mask) {} static inline void filemap_prepare_alloc(gfp_t *gfp_mask) {} static inline void shmem_reliable_init(void) {} +static inline void reliable_lru_add(enum lru_list lru, struct folio *folio, + int val) {} +static inline void reliable_lru_add_batch(int zid, enum lru_list lru, + int val) {} +static inline bool mem_reliable_counter_initialized(void) { return false; } #endif
#endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8148b30a9df1..57acfa854841 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/userfaultfd_k.h> #include <linux/swapops.h> +#include <linux/mem_reliable.h>
/** * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? @@ -195,6 +196,7 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli if (old_gen < 0) { if (lru_gen_is_active(lruvec, new_gen)) lru += LRU_ACTIVE; + reliable_lru_add(lru, folio, delta); __update_lru_size(lruvec, lru, zone, delta); return; } @@ -203,6 +205,7 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli if (new_gen < 0) { if (lru_gen_is_active(lruvec, old_gen)) lru += LRU_ACTIVE; + reliable_lru_add(lru, folio, -delta); __update_lru_size(lruvec, lru, zone, -delta); return; } @@ -317,6 +320,7 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) if (lru_gen_add_folio(lruvec, folio, false)) return;
+ reliable_lru_add(lru, folio, folio_nr_pages(folio)); update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); if (lru != LRU_UNEVICTABLE) @@ -331,6 +335,7 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) if (lru_gen_add_folio(lruvec, folio, true)) return;
+ reliable_lru_add(lru, folio, folio_nr_pages(folio)); update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); /* This is not expected to be used on LRU_UNEVICTABLE */ @@ -347,6 +352,7 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
if (lru != LRU_UNEVICTABLE) list_del(&folio->lru); + reliable_lru_add(lru, folio, -folio_nr_pages(folio)); update_lru_size(lruvec, lru, folio_zonenum(folio), -folio_nr_pages(folio)); } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 57086c57b8e4..d055148f47ad 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -286,6 +286,11 @@ static inline bool is_file_lru(enum lru_list lru) return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); }
+static inline int is_anon_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_ANON || lru == LRU_ACTIVE_ANON); +} + static inline bool is_active_lru(enum lru_list lru) { return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index c94e62407263..be2f3fdfcf1c 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -6,6 +6,8 @@ #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/crash_dump.h> +#include <linux/seq_file.h> +#include <linux/mmzone.h>
#define PAGES_TO_B(n_pages) ((n_pages) << PAGE_SHIFT)
@@ -15,6 +17,14 @@ EXPORT_SYMBOL_GPL(mem_reliable); bool reliable_enabled; bool shmem_reliable __read_mostly = true; bool pagecache_reliable __read_mostly = true; +struct percpu_counter pagecache_reliable_pages; +struct percpu_counter anon_reliable_pages; + +bool mem_reliable_counter_initialized(void) +{ + return likely(percpu_counter_initialized(&pagecache_reliable_pages)) && + likely((percpu_counter_initialized(&anon_reliable_pages))); +}
bool mem_reliable_status(void) { @@ -72,6 +82,48 @@ void shmem_reliable_init(void) shmem_reliable = false; }
+void reliable_lru_add_batch(int zid, enum lru_list lru, int val) +{ + if (!mem_reliable_is_enabled()) + return; + + if (zid < ZONE_MOVABLE) { + if (is_file_lru(lru)) + percpu_counter_add(&pagecache_reliable_pages, val); + else if (is_anon_lru(lru)) + percpu_counter_add(&anon_reliable_pages, val); + } +} + +void reliable_lru_add(enum lru_list lru, struct folio *folio, int val) +{ + if (!folio_reliable(folio)) + return; + + if (is_file_lru(lru)) + percpu_counter_add(&pagecache_reliable_pages, val); + else if (is_anon_lru(lru)) + percpu_counter_add(&anon_reliable_pages, val); + else if (lru == LRU_UNEVICTABLE) { + if (folio_test_anon(folio)) + percpu_counter_add(&anon_reliable_pages, val); + else + percpu_counter_add(&pagecache_reliable_pages, val); + } +} + +static int __init reliable_sysctl_init(void) +{ + if (!mem_reliable_is_enabled()) + return 0; + + percpu_counter_init(&pagecache_reliable_pages, 0, GFP_KERNEL); + percpu_counter_init(&anon_reliable_pages, 0, GFP_KERNEL); + + return 0; +} +arch_initcall(reliable_sysctl_init); + static int __init setup_reliable_debug(char *str) { if (*str++ != '=' || !*str) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7a676296af30..127c7870ac7a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2257,6 +2257,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, continue;
update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); + reliable_lru_add_batch(zid, lru, -nr_zone_taken[zid]); }
} @@ -3859,6 +3860,7 @@ static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk if (lru_gen_is_active(lruvec, gen)) lru += LRU_ACTIVE; __update_lru_size(lruvec, lru, zone, delta); + reliable_lru_add_batch(zone, lru, delta); } }