From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
Count reliable memory info based on zone info. Any zone below ZONE_MOVABLE is seed as reliable zone and sum the pages there.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com --- Documentation/filesystems/proc.rst | 3 ++ include/linux/mem_reliable.h | 14 ++---- mm/mem_reliable.c | 73 ++++++++++-------------------- mm/page_alloc.c | 6 ++- 4 files changed, 34 insertions(+), 62 deletions(-)
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index f6783bb99e3f..6ae531ee4de9 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -971,6 +971,7 @@ varies by architecture and compile options. The following is from a ShmemPmdMapped: 0 kB ReliableTotal: 7340032 kB ReliableUsed: 418824 kB + ReliableBuddyMem: 418824 kB
MemTotal Total usable RAM (i.e. physical RAM minus a few reserved @@ -1104,6 +1105,8 @@ ReliableTotal Total reliable memory size ReliableUsed The used amount of reliable memory +ReliableBuddyMem + Size of unused mirrored memory in buddy system
vmallocinfo ~~~~~~~~~~~ diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index b75feac5e33c..7b22229068f1 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -15,9 +15,9 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable); extern bool reliable_enabled; extern bool shmem_reliable;
-extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, - unsigned long *zone_movable_pfn); + unsigned long *zone_movable_pfn, + unsigned long mirrored_sz); extern void shmem_reliable_init(void); extern void reliable_report_meminfo(struct seq_file *m); extern void page_cache_prepare_alloc(gfp_t *gfp); @@ -28,11 +28,6 @@ static inline bool mem_reliable_is_enabled(void) return static_branch_likely(&mem_reliable); }
-static inline bool zone_reliable(struct zone *zone) -{ - return mem_reliable_is_enabled() && zone_idx(zone) < ZONE_MOVABLE; -} - static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) { if (!mem_reliable_is_enabled()) @@ -59,11 +54,10 @@ static inline bool shmem_reliable_is_enabled(void) #define reliable_enabled 0
static inline bool mem_reliable_is_enabled(void) { return false; } -static inline void add_reliable_mem_size(long sz) {} static inline void mem_reliable_init(bool has_unmirrored_mem, - unsigned long *zone_movable_pfn) {} + unsigned long *zone_movable_pfn, + unsigned long mirrored_sz) {} static inline void shmem_reliable_init(void) {} -static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) { return false; diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index d46fe86563bd..876335fc4060 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -8,12 +8,12 @@ #include <linux/seq_file.h> #include <linux/mmzone.h>
+#define PAGES_TO_B(n_pages) ((n_pages) << PAGE_SHIFT) + DEFINE_STATIC_KEY_FALSE(mem_reliable); EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled; - -static atomic_long_t total_reliable_mem; bool shmem_reliable __read_mostly = true;
bool mem_reliable_status(void) @@ -28,62 +28,42 @@ void page_cache_prepare_alloc(gfp_t *gfp) *gfp |= GFP_RELIABLE; }
-void add_reliable_mem_size(long sz) -{ - atomic_long_add(sz, &total_reliable_mem); -} - -static unsigned long total_reliable_mem_sz(void) -{ - return atomic_long_read(&total_reliable_mem); -} - -static unsigned long used_reliable_mem_sz(void) +static unsigned long total_reliable_pages(void) { - unsigned long nr_page = 0; + unsigned long total_reliable_pages = 0; struct zone *z;
for_each_populated_zone(z) if (zone_idx(z) < ZONE_MOVABLE) - nr_page += zone_page_state(z, NR_FREE_PAGES); + total_reliable_pages += zone_managed_pages(z);
- return total_reliable_mem_sz() - nr_page * PAGE_SIZE; + return total_reliable_pages; }
-static int reliable_mem_notifier(struct notifier_block *nb, - unsigned long action, void *arg) +static unsigned long free_reliable_pages(void) { - struct memory_notify *m_arg = arg; struct zone *zone; + unsigned long cnt = 0;
- switch (action) { - case MEM_ONLINE: - zone = page_zone(pfn_to_page(m_arg->start_pfn)); - if (zone_reliable(zone)) - add_reliable_mem_size(m_arg->nr_pages * PAGE_SIZE); - break; - case MEM_OFFLINE: - zone = page_zone(pfn_to_page(m_arg->start_pfn)); - if (zone_reliable(zone)) - add_reliable_mem_size(-m_arg->nr_pages * PAGE_SIZE); - break; - default: - break; - } + for_each_populated_zone(zone) + if (zone_idx(zone) < ZONE_MOVABLE) + cnt += zone_page_state(zone, NR_FREE_PAGES);
- return NOTIFY_OK; + return cnt; }
-static struct notifier_block reliable_notifier_block = { - .notifier_call = reliable_mem_notifier, -}; +static unsigned long used_reliable_pages(void) +{ + return total_reliable_pages() - free_reliable_pages(); +}
-void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) +void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn, + unsigned long mirrored_sz) { if (!reliable_enabled) return;
- if (atomic_long_read(&total_reliable_mem) == 0) { + if (!mirrored_sz) { memset(zone_movable_pfn, 0, sizeof(unsigned long) * MAX_NUMNODES); pr_err("init failed, mirrored memory size is zero.\n"); @@ -95,15 +75,9 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) return; }
- if (register_hotmemory_notifier(&reliable_notifier_block)) { - pr_err("init failed, register memory notifier failed.\n"); - return; - } - static_branch_enable(&mem_reliable);
- pr_info("init succeed, mirrored memory size(%lu)\n", - total_reliable_mem_sz()); + pr_info("init succeed, mirrored memory size(%lu)\n", mirrored_sz); }
void shmem_reliable_init(void) @@ -123,8 +97,7 @@ void reliable_report_meminfo(struct seq_file *m) if (!mem_reliable_is_enabled()) return;
- show_val_kb(m, "ReliableTotal: ", - total_reliable_mem_sz() >> PAGE_SHIFT); - show_val_kb(m, "ReliableUsed: ", - used_reliable_mem_sz() >> PAGE_SHIFT); + show_val_kb(m, "ReliableTotal: ", total_reliable_pages()); + show_val_kb(m, "ReliableUsed: ", used_reliable_pages()); + show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages()); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ff6fffec8770..732304f03fda 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7525,10 +7525,11 @@ static void __init find_zone_movable_pfns_for_nodes(void) if (mirrored_kernelcore) { bool mem_below_4gb_not_mirrored = false; bool has_unmirrored_mem = false; + unsigned long mirrored_sz = 0;
for_each_mem_region(r) { if (memblock_is_mirror(r)) { - add_reliable_mem_size(r->size); + mirrored_sz += r->size; continue; }
@@ -7550,7 +7551,8 @@ static void __init find_zone_movable_pfns_for_nodes(void) if (mem_below_4gb_not_mirrored) pr_warn("This configuration results in unmirrored kernel memory.\n");
- mem_reliable_init(has_unmirrored_mem, zone_movable_pfn); + mem_reliable_init(has_unmirrored_mem, zone_movable_pfn, + mirrored_sz);
goto out2; }