From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Add a counter to count mirrored pages in buddy system.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/filesystems/proc.txt | 2 ++ include/linux/mem_reliable.h | 8 ++++++++ mm/mem_reliable.c | 10 ++++++++++ mm/page_alloc.c | 4 ++++ 4 files changed, 24 insertions(+)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 78c76d24f9f7d..3c0e7e5f78a96 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -884,6 +884,7 @@ ShmemHugePages: 0 kB ShmemPmdMapped: 0 kB ReliableTotal: 7340032 kB ReliableUsed: 418824 kB +ReliableBuddyMem: 418824 kB ReliableShmem: 96 kB
@@ -977,6 +978,7 @@ VmallocChunk: largest contiguous block of vmalloc area which is free allocations. This stat excludes the cost of metadata. ReliableTotal: Total reliable memory size ReliableUsed: The used amount of reliable memory +ReliableBuddyMem: Total mirrored memory size in buddy system ReliableShmem: Reliable memory used by shmem
.............................................................................. diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 3fd4364d8ae4b..3e0d2a002aa1f 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -22,6 +22,7 @@ extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; extern bool pagecache_use_reliable_mem; extern atomic_long_t page_cache_fallback; +DECLARE_PER_CPU(long, nr_reliable_buddy_pages); extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz); @@ -100,6 +101,12 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) percpu_counter_add(&reliable_shmem_used_nr_page, nr_page); }
+static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) +{ + if (page && page_reliable(page)) + this_cpu_add(nr_reliable_buddy_pages, nr_page); +} + #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false @@ -139,6 +146,7 @@ static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {}
static inline bool pagecache_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_status(void) { return false; } +static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} #endif
#endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 2501da40abf4a..5491a1bafe02c 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -28,6 +28,7 @@ unsigned long task_reliable_limit = ULONG_MAX; bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly; +DEFINE_PER_CPU(long, nr_reliable_buddy_pages);
bool pagecache_use_reliable_mem __read_mostly = true; atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); @@ -168,11 +169,20 @@ static unsigned long used_reliable_mem_sz(void)
void reliable_report_meminfo(struct seq_file *m) { + long buddy_pages_sum = 0; + int cpu; + if (mem_reliable_is_enabled()) { + for_each_possible_cpu(cpu) + buddy_pages_sum += + per_cpu(nr_reliable_buddy_pages, cpu); + seq_printf(m, "ReliableTotal: %8lu kB\n", total_reliable_mem_sz() >> 10); seq_printf(m, "ReliableUsed: %8lu kB\n", used_reliable_mem_sz() >> 10); + seq_printf(m, "ReliableBuddyMem: %8lu kB\n", + buddy_pages_sum << (PAGE_SHIFT - 10));
if (shmem_reliable_is_enabled()) { unsigned long shmem = (unsigned long)percpu_counter_sum( diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 14b4debd998e9..e139605f1dbb4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1340,6 +1340,7 @@ static void __free_pages_ok(struct page *page, unsigned int order, migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); + mem_reliable_buddy_counter(page, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype, fpi_flags); local_irq_restore(flags); @@ -2919,6 +2920,7 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
migratetype = get_pcppage_migratetype(page); __count_vm_event(PGFREE); + mem_reliable_buddy_counter(page, 1);
/* * We only track unmovable, reclaimable and movable on pcp lists. @@ -3156,6 +3158,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, page = __rmqueue_pcplist(zone, migratetype, pcp, list); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + mem_reliable_buddy_counter(page, -(1 << order)); zone_statistics(preferred_zone, zone); } local_irq_restore(flags); @@ -3204,6 +3207,7 @@ struct page *rmqueue(struct zone *preferred_zone, get_pcppage_migratetype(page));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + mem_reliable_buddy_counter(page, -(1 << order)); zone_statistics(preferred_zone, zone); local_irq_restore(flags);