hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9GZAQ CVE: NA
--------------------------------
The preorder patch constitutes a set of independent SPE drivers, and isolates the basic operations of SPE from the perf driver, including SPE interrupt handling, enabling and other operations, so that the use of SPE functionality can be divided into two categories: the first is the basic modules of the kernel, such as: Numa Balance, Damon, etc., and the second is the use of perf driver by the user, where the The first type of users The first type of users starts and stops the SPE through the abstraction layer mem_sampling. in addition, in order to avoid as much as possible embedded modifications to the perf code, we introduced the cmdline mem_sampling_on to control the use of mem_sampling, when the perf spe can not be captured, and vice versa.
Signed-off-by: Ze Zuo zuoze1@huawei.com --- drivers/arm/spe/Kconfig | 2 +- drivers/arm/spe/spe.c | 13 +++++++++---- drivers/perf/arm_spe_pmu.c | 6 ++++++ include/linux/mem_sampling.h | 6 ++++++ mm/mem_sampling.c | 15 +++++++++++++++ 5 files changed, 37 insertions(+), 5 deletions(-)
diff --git a/drivers/arm/spe/Kconfig b/drivers/arm/spe/Kconfig index 5ede60666349..870b630b8a1d 100644 --- a/drivers/arm/spe/Kconfig +++ b/drivers/arm/spe/Kconfig @@ -4,7 +4,7 @@ # config ARM_SPE bool "In-kernel SPE for driver for page access profiling" - depends on ARM64 && !ARM_SPE_PMU + depends on ARM64 help Enable support for the ARMv8.2 Statistical Profiling Extension, which provides periodic sampling of operations in the CPU pipeline. diff --git a/drivers/arm/spe/spe.c b/drivers/arm/spe/spe.c index 1be13a0ed2d1..c476b1500269 100644 --- a/drivers/arm/spe/spe.c +++ b/drivers/arm/spe/spe.c @@ -700,6 +700,15 @@ static int arm_spe_device_probe(struct platform_device *pdev) int ret; struct device *dev = &pdev->dev;
+ /* + * TODO: Find a clean way to disable SPE so that SPE + * can be used for perf. + */ + if (!mem_sampling_support()) { + dev_warn_once(dev, "mem_sampling for spe is busy."); + return -EBUSY; + } + /* * If kernelspace is unmapped when running at EL0, then the SPE * buffer will fault and prematurely terminate the AUX session. @@ -784,10 +793,6 @@ static int __init arm_spe_init(void)
static void __exit arm_spe_exit(void) { - /* - * TODO: Find a clean way to disable SPE so that SPE - * can be used for perf. - */ platform_driver_unregister(&arm_spe_driver); cpuhp_remove_multi_state(arm_spe_online); arm_spe_buffer_free(); diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 2a4ebdd1ee78..f3421bcae951 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/vmalloc.h> +#include <linux/mem_sampling.h>
#include <asm/barrier.h> #include <asm/cpufeature.h> @@ -1199,6 +1200,11 @@ static int arm_spe_pmu_device_probe(struct platform_device *pdev) struct arm_spe_pmu *spe_pmu; struct device *dev = &pdev->dev;
+ if (mem_sampling_support()) { + dev_warn_once(dev, "perf driver for spe is busy."); + return -EBUSY; + } + /* * If kernelspace is unmapped when running at EL0, then the SPE * buffer will fault and prematurely terminate the AUX session. diff --git a/include/linux/mem_sampling.h b/include/linux/mem_sampling.h index b27b5e1fd96e..8c520a03b1a0 100644 --- a/include/linux/mem_sampling.h +++ b/include/linux/mem_sampling.h @@ -59,6 +59,12 @@ void mem_sampling_sched_in(struct task_struct *prev, struct task_struct *curr); static inline void mem_sampling_sched_in(struct task_struct *prev, struct task_struct *curr) { }; #endif
+#ifdef CONFIG_MEM_SAMPLING +bool mem_sampling_support(void); +#else +#define mem_sampling_support() NULL +#endif + /* invoked by specific mem_sampling */ typedef void (*mem_sampling_cb_type)(struct mem_sampling_record *record_base, int n_records); diff --git a/mm/mem_sampling.c b/mm/mem_sampling.c index 1dc813d1c157..7d268f19a1f2 100644 --- a/mm/mem_sampling.c +++ b/mm/mem_sampling.c @@ -26,6 +26,8 @@
struct mem_sampling_ops_struct mem_sampling_ops;
+static int mem_sampling; + #define MEM_SAMPLING_DISABLED 0x0 #define MEM_SAMPLING_NORMAL 0x1 #define NUMA_BALANCING_HW_DISABLED 0x0 @@ -96,6 +98,11 @@ void mem_sampling_sched_in(struct task_struct *prev, struct task_struct *curr) mem_sampling_ops.sampling_stop(); }
+bool mem_sampling_support(void) +{ + return mem_sampling; +} + static void mem_sampling_process(struct mem_sampling_record *record_base, int nr_records) { int i; @@ -428,6 +435,14 @@ static void __init check_mem_sampling_enable(void) set_mem_sampling_state(mem_sampling_default); }
+static int __init mem_sampling_setup(char *str) +{ + mem_sampling = 1; + return 1; + +} +__setup("mem_sampling_on", mem_sampling_setup); + static int __init mem_sampling_init(void) { enum mem_sampling_type_enum mem_sampling_type = mem_sampling_get_type();