From: Barry Song v-songbaohua@oppo.com
next inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9Q9DF CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
This helps to display the fragmentation situation of the swapfile, knowing the proportion of how much we haven't split large folios. So far, we only support non-split swapout for anon memory, with the possibility of expanding to shmem in the future. So, we add the "anon" prefix to the counter names.
Link: https://lkml.kernel.org/r/20240412114858.407208-3-21cnbao@gmail.com Signed-off-by: Barry Song v-songbaohua@oppo.com Reviewed-by: Ryan Roberts ryan.roberts@arm.com Acked-by: David Hildenbrand david@redhat.com Cc: Chris Li chrisl@kernel.org Cc: Domenico Cerasuolo cerasuolodomenico@gmail.com Cc: Kairui Song kasong@tencent.com Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Peter Xu peterx@redhat.com Cc: Ryan Roberts ryan.roberts@arm.com Cc: Suren Baghdasaryan surenb@google.com Cc: Yosry Ahmed yosryahmed@google.com Cc: Yu Zhao yuzhao@google.com Cc: Jonathan Corbet corbet@lwn.net Signed-off-by: Andrew Morton akpm@linux-foundation.org --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 4 ++++ mm/page_io.c | 1 + mm/vmscan.c | 3 +++ 4 files changed, 10 insertions(+)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 0e87d2ebb541..3e5f7064e2de 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -265,6 +265,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_ANON_SWPOUT, + MTHP_STAT_ANON_SWPOUT_FALLBACK, __MTHP_STAT_COUNT };
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d3980c66b0fc..763bb25e4f99 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -689,11 +689,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, &anon_fault_fallback_attr.attr, &anon_fault_fallback_charge_attr.attr, + &anon_swpout_attr.attr, + &anon_swpout_fallback_attr.attr, NULL, };
diff --git a/mm/page_io.c b/mm/page_io.c index ea8d57b9b3ae..80e49e536d37 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -212,6 +212,7 @@ static inline void count_swpout_vm_event(struct folio *folio) count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); } + count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d8f2b571562c..34614bb7062d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1911,6 +1911,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, goto activate_locked; } if (!add_to_swap(folio)) { + int __maybe_unused order = folio_order(folio); + if (!folio_test_large(folio)) goto activate_locked_split; /* Fallback to swap normal pages */ @@ -1922,6 +1924,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, THP_SWPOUT_FALLBACK, 1); count_vm_event(THP_SWPOUT_FALLBACK); } + count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split;