
hulk inclusion category:feature bugzilla:https://gitee.com/openeuler/kernel/issues/IC8KS8 CVE: NA -------------------------------- Introduce two tracepoints to improve observability of memory access sampling and NUMA-aware page migration decisions: - trace_mm_mem_sampling_access_record(): Logs the virtual and physical addresses of sampled memory accesses, along with the CPU and PID. Useful for tracking which pages are actively accessed and considered for NUMA balancing. - trace_mm_numa_migrating(): Logs page migration activity triggered by NUMA balancing, including the logical address, source node, destination node, and migration outcome (via TNF_MIGRATED flag). These tracepoints enable precise tracing of how sampled access patterns influence memory placement and migration, aiding debugging, performance analysis, and validation of NUMA policies that depend on hardware access hints. Signed-off-by: Ze Zuo <zuoze1@huawei.com> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Signed-off-by: Shuang Yan <yanshuang7@huawei.com> --- include/trace/events/kmem.h | 54 +++++++++++++++++++++++++++++++++++++ mm/mem_sampling.c | 4 +++ 2 files changed, 58 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 19b8ca352dde..423e3ba1b3d1 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -433,6 +433,60 @@ TRACE_EVENT(mm_spe_record, __entry->vaddr, __entry->paddr, __entry->pid) ); #endif /* CONFIG_ARM_SPE_MEM_SAMPLING */ + + +#ifdef CONFIG_NUMABALANCING_MEM_SAMPLING +TRACE_EVENT(mm_numa_migrating, + + TP_PROTO(u64 vaddr, int page_nid, int target_nid, + int migrate_success), + + TP_ARGS(vaddr, page_nid, target_nid, migrate_success), + + TP_STRUCT__entry( + __field(u64, vaddr) + __field(int, page_nid) + __field(int, target_nid) + __field(int, migrate_success) + ), + + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->page_nid = page_nid; + __entry->target_nid = target_nid; + __entry->migrate_success = !!(migrate_success); + ), + + TP_printk("vaddr=%llu page_nid=%d target_nid=%d migrate_success=%d", + __entry->vaddr, __entry->page_nid, + __entry->target_nid, __entry->migrate_success) +); + +TRACE_EVENT(mm_mem_sampling_access_record, + + TP_PROTO(u64 vaddr, u64 paddr, int cpuid, int pid), + + TP_ARGS(vaddr, paddr, cpuid, pid), + + TP_STRUCT__entry( + __field(u64, vaddr) + __field(u64, paddr) + __field(int, cpuid) + __field(int, pid) + ), + + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->paddr = paddr; + __entry->cpuid = cpuid; + __entry->pid = pid; + ), + + TP_printk("vaddr=%llu paddr=%llu cpuid=%d pid=%d", + __entry->vaddr, __entry->paddr, + __entry->cpuid, __entry->pid) +); +#endif /* CONFIG_NUMABALANCING_MEM_SAMPLING */ #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/mm/mem_sampling.c b/mm/mem_sampling.c index f6c84b2986f9..3550c71b3f3d 100644 --- a/mm/mem_sampling.c +++ b/mm/mem_sampling.c @@ -21,6 +21,7 @@ #include <linux/mempolicy.h> #include <linux/task_work.h> #include <linux/migrate.h> +#include <trace/events/kmem.h> #include <linux/sched/numa_balancing.h> #define MEM_SAMPLING_DISABLED 0x0 @@ -214,6 +215,7 @@ static void do_numa_access(struct task_struct *p, u64 laddr, u64 paddr) } out: + trace_mm_numa_migrating(laddr, page_nid, target_nid, flags&TNF_MIGRATED); if (page_nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, page_nid, 1, flags); @@ -260,6 +262,8 @@ static void numa_balancing_mem_sampling_cb(struct mem_sampling_record *record) if (p->pid != record->context_id) return; + trace_mm_mem_sampling_access_record(vaddr, paddr, smp_processor_id(), + current->pid); numa_create_taskwork(vaddr, paddr, smp_processor_id()); } -- 2.25.1