From: Yosry Ahmed yosryahmed@google.com
mainline inclusion from mainline-v6.8-rc1 commit e0bf1dc859fdd08ef738824710770a30a8069433 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9RPY4 CVE: NA
--------------------------------
The following patch will make use of those structs in the flushing code, so move their definitions (and a few other dependencies) a little bit up to reduce the diff noise in the following patch.
No functional change intended.
Link: https://lkml.kernel.org/r/20231129032154.3710765-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed yosryahmed@google.com Tested-by: Domenico Cerasuolo cerasuolodomenico@gmail.com Acked-by: Shakeel Butt shakeelb@google.com Cc: Chris Li chrisl@kernel.org Cc: Greg Thelen gthelen@google.com Cc: Ivan Babrou ivan@cloudflare.com Cc: Johannes Weiner hannes@cmpxchg.org Cc: Michal Hocko mhocko@kernel.org Cc: Michal Koutny mkoutny@suse.com Cc: Muchun Song muchun.song@linux.dev Cc: Roman Gushchin roman.gushchin@linux.dev Cc: Tejun Heo tj@kernel.org Cc: Waiman Long longman@redhat.com Cc: Wei Xu weixugc@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Conflict: mm/memcontrol.c Signed-off-by: Lu Jialin lujialin4@huawei.com --- mm/memcontrol.c | 162 ++++++++++++++++++++++-------------------------- 1 file changed, 73 insertions(+), 89 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 06cf3ff59540..c136d755bd1e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -601,6 +601,79 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) return mz; }
+/* Subset of vm_event_item to report for memcg event stats */ +static const unsigned int memcg_vm_event_stat[] = { + PGPGIN, + PGPGOUT, + PGSCAN_KSWAPD, + PGSCAN_DIRECT, + PGSCAN_KHUGEPAGED, + PGSTEAL_KSWAPD, + PGSTEAL_DIRECT, + PGSTEAL_KHUGEPAGED, + PGFAULT, + PGMAJFAULT, + PGREFILL, + PGACTIVATE, + PGDEACTIVATE, + PGLAZYFREE, + PGLAZYFREED, +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) + ZSWPIN, + ZSWPOUT, +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + THP_FAULT_ALLOC, + THP_COLLAPSE_ALLOC, + THP_SWPOUT, + THP_SWPOUT_FALLBACK, +#endif +}; + +#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) +static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; + +static void init_memcg_events(void) +{ + int i; + + for (i = 0; i < NR_MEMCG_EVENTS; ++i) + mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; +} + +static inline int memcg_events_index(enum vm_event_item idx) +{ + return mem_cgroup_events_index[idx] - 1; +} + +struct memcg_vmstats_percpu { + /* Local (CPU and cgroup) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_MEMCG_EVENTS]; + + /* Delta calculation for lockless upward propagation */ + long state_prev[MEMCG_NR_STAT]; + unsigned long events_prev[NR_MEMCG_EVENTS]; + + /* Cgroup1: threshold notifications & softlimit tree updates */ + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct memcg_vmstats { + /* Aggregated (CPU and subtree) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_MEMCG_EVENTS]; + + /* Non-hierarchical (CPU aggregated) page state & events */ + long state_local[MEMCG_NR_STAT]; + unsigned long events_local[NR_MEMCG_EVENTS]; + + /* Pending child counts during tree propagation */ + long state_pending[MEMCG_NR_STAT]; + unsigned long events_pending[NR_MEMCG_EVENTS]; +}; + /* * memcg and lruvec stats flushing * @@ -712,95 +785,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w) queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); }
-/* Subset of vm_event_item to report for memcg event stats */ -static const unsigned int memcg_vm_event_stat[] = { - PGPGIN, - PGPGOUT, - PGSCAN_KSWAPD, - PGSCAN_DIRECT, - PGSCAN_KHUGEPAGED, - PGSTEAL_KSWAPD, - PGSTEAL_DIRECT, - PGSTEAL_KHUGEPAGED, - PGFAULT, - PGMAJFAULT, - PGREFILL, - PGACTIVATE, - PGDEACTIVATE, - PGLAZYFREE, - PGLAZYFREED, -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) - ZSWPIN, - ZSWPOUT, -#endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - THP_FAULT_ALLOC, - THP_COLLAPSE_ALLOC, - THP_SWPOUT, - THP_SWPOUT_FALLBACK, -#endif -}; - -#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) -static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; - -static void init_memcg_events(void) -{ - int i; - - for (i = 0; i < NR_MEMCG_EVENTS; ++i) - mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; -} - -static inline int memcg_events_index(enum vm_event_item idx) -{ - return mem_cgroup_events_index[idx] - 1; -} - -struct memcg_vmstats_percpu { - /* Local (CPU and cgroup) page state & events */ - long state[MEMCG_NR_STAT]; - unsigned long events[NR_MEMCG_EVENTS]; - - /* Delta calculation for lockless upward propagation */ - long state_prev[MEMCG_NR_STAT]; - unsigned long events_prev[NR_MEMCG_EVENTS]; - - /* Cgroup1: threshold notifications & softlimit tree updates */ - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; - KABI_RESERVE(1) - KABI_RESERVE(2) - KABI_RESERVE(3) - KABI_RESERVE(4) - KABI_RESERVE(5) - KABI_RESERVE(6) - KABI_RESERVE(7) - KABI_RESERVE(8) -}; - -struct memcg_vmstats { - /* Aggregated (CPU and subtree) page state & events */ - long state[MEMCG_NR_STAT]; - unsigned long events[NR_MEMCG_EVENTS]; - - /* Non-hierarchical (CPU aggregated) page state & events */ - long state_local[MEMCG_NR_STAT]; - unsigned long events_local[NR_MEMCG_EVENTS]; - - /* Pending child counts during tree propagation */ - long state_pending[MEMCG_NR_STAT]; - unsigned long events_pending[NR_MEMCG_EVENTS]; - KABI_RESERVE(1) - KABI_RESERVE(2) - KABI_RESERVE(3) - KABI_RESERVE(4) - KABI_RESERVE(5) - KABI_RESERVE(6) - KABI_RESERVE(7) - KABI_RESERVE(8) -}; - unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) { long x = READ_ONCE(memcg->vmstats->state[idx]);