From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6RKHX CVE: NA
--------------------------------
commit ee97b61005df ("mm: Count mirrored pages in buddy system") proposes to use vmstat events (PGALLOC/PGFREE) to count free reliable pages. However, this can be achieved by counting NR_FREE_PAGES for all non-movable zones, which will have better compatibility. Therefore, replace it.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- include/linux/mem_reliable.h | 7 ------- mm/mem_reliable.c | 27 +++++++++++++-------------- mm/page_alloc.c | 4 ---- 3 files changed, 13 insertions(+), 25 deletions(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index aa3fe77c8a72..c0eff851bbe7 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -111,12 +111,6 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) percpu_counter_add(&reliable_shmem_used_nr_page, nr_page); }
-static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) -{ - if (page_reliable(page)) - this_cpu_add(nr_reliable_buddy_pages, nr_page); -} - static inline bool mem_reliable_shmem_limit_check(void) { return percpu_counter_read_positive(&reliable_shmem_used_nr_page) < @@ -168,7 +162,6 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
static inline bool pagecache_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_status(void) { return false; } -static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline void reliable_lru_add(enum lru_list lru, struct page *page, diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index f8b8543d7933..524d276db72c 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -28,7 +28,6 @@ unsigned long task_reliable_limit = ULONG_MAX; bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly; -DEFINE_PER_CPU(long, nr_reliable_buddy_pages); long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; @@ -176,16 +175,21 @@ static unsigned long total_reliable_mem_sz(void) return atomic_long_read(&total_reliable_mem); }
-static unsigned long used_reliable_mem_sz(void) +static inline long free_reliable_pages(void) { - unsigned long nr_page = 0; - struct zone *z; + struct zone *zone; + unsigned long cnt = 0;
- for_each_populated_zone(z) - if (zone_idx(z) < ZONE_MOVABLE) - nr_page += zone_page_state(z, NR_FREE_PAGES); + for_each_populated_zone(zone) + if (zone_idx(zone) < ZONE_MOVABLE) + cnt += zone_page_state(zone, NR_FREE_PAGES);
- return total_reliable_mem_sz() - nr_page * PAGE_SIZE; + return cnt; +} + +static unsigned long used_reliable_mem_sz(void) +{ + return total_reliable_mem_sz() - (free_reliable_pages() << PAGE_SHIFT); }
static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) @@ -198,15 +202,10 @@ void reliable_report_meminfo(struct seq_file *m) { s64 nr_pagecache_pages = 0; s64 nr_anon_pages = 0; - long nr_buddy_pages = 0; - int cpu;
if (!mem_reliable_is_enabled()) return;
- for_each_possible_cpu(cpu) - nr_buddy_pages += per_cpu(nr_reliable_buddy_pages, cpu); - nr_anon_pages = percpu_counter_sum_positive(&anon_reliable_pages); nr_pagecache_pages = percpu_counter_sum_positive(&pagecache_reliable_pages);
@@ -215,7 +214,7 @@ void reliable_report_meminfo(struct seq_file *m) show_val_kb(m, "ReliableUsed: ", used_reliable_mem_sz() >> PAGE_SHIFT); show_val_kb(m, "ReliableTaskUsed: ", nr_anon_pages + nr_pagecache_pages); - show_val_kb(m, "ReliableBuddyMem: ", nr_buddy_pages); + show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages());
if (shmem_reliable_is_enabled()) { show_val_kb(m, "ReliableShmem: ", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e722d73a3724..2084b912efa8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1340,7 +1340,6 @@ static void __free_pages_ok(struct page *page, unsigned int order, migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); - mem_reliable_buddy_counter(page, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype, fpi_flags); local_irq_restore(flags); @@ -2920,7 +2919,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
migratetype = get_pcppage_migratetype(page); __count_vm_event(PGFREE); - mem_reliable_buddy_counter(page, 1);
/* * We only track unmovable, reclaimable and movable on pcp lists. @@ -3174,7 +3172,6 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, page = __rmqueue_pcplist(zone, migratetype, pcp, list); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - mem_reliable_buddy_counter(page, -(1 << order)); zone_statistics(preferred_zone, zone); } local_irq_restore(flags); @@ -3223,7 +3220,6 @@ struct page *rmqueue(struct zone *preferred_zone, get_pcppage_migratetype(page));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - mem_reliable_buddy_counter(page, -(1 << order)); zone_statistics(preferred_zone, zone); local_irq_restore(flags);