From: James Morse james.morse@arm.com
resctrl's pseudo lock has some copy-to-cache and measurement functions that are micro-architecture specific. pseudo_lock_fn() is not at all portable. Label these 'resctrl_arch_' so they stay under /arch/x86.
Pseudo-lock doesn't work without these, add a Kconfig symbol to disable it. This ensures that rdtgrp->mode can never be RDT_MODE_PSEUDO_LOCKED.
Arm's cache-lockdown works in a very different way. As it isn't possible to disable prefetch or efficiently restrict the CPUs ability to pull lines into the cache, this resctrl pseudo_lock stuff isn't going to be used on arm.
Signed-off-by: James Morse james.morse@arm.com --- arch/x86/Kconfig | 7 ++++ arch/x86/include/asm/resctrl.h | 5 +++ arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 3 +- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 48 +++++++++++++++-------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 17 +++++--- 5 files changed, 56 insertions(+), 24 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 66bfabae8814..d9434d0d99b4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -480,6 +480,7 @@ config X86_CPU_RESCTRL depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS select PROC_CPU_RESCTRL if PROC_FS + select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support.
@@ -496,6 +497,12 @@ config X86_CPU_RESCTRL
Say N if unsure.
+config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to try and pin data in a cache portion using + micro-architecture tricks. + if X86_32 config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 50407e83d0ca..a88af68f9fe2 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -211,6 +211,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx) { };
+u64 resctrl_arch_get_prefetch_disable_bits(void); +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp); +int resctrl_arch_measure_cycles_lat_fn(void *_plr); +int resctrl_arch_measure_l2_residency(void *_plr); +int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c);
#else diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 0a5a945ca368..215386d031ef 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -174,7 +174,8 @@ static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, if (!cbm_validate(data->buf, &cbm_val, r)) return -EINVAL;
- if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || + if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + (rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_SHAREABLE) && rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index f2315a50ea4f..09adf0e3e753 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -62,7 +62,8 @@ static const struct class pseudo_lock_class = { };
/** - * get_prefetch_disable_bits - prefetch disable bits of supported platforms + * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported + * platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support @@ -76,13 +77,13 @@ static const struct class pseudo_lock_class = { * in the SDM. * * When adding a platform here also add support for its cache events to - * measure_cycles_perf_fn() + * resctrl_arch_measure_l*_residency() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ -static u64 get_prefetch_disable_bits(void) +u64 resctrl_arch_get_prefetch_disable_bits(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) @@ -410,7 +411,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) }
/** - * pseudo_lock_fn - Load kernel memory into cache + * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache * @_rdtgrp: resource group to which pseudo-lock region belongs * * This is the core pseudo-locking flow. @@ -428,7 +429,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) { struct rdtgroup *rdtgrp = _rdtgrp; struct pseudo_lock_region *plr = rdtgrp->plr; @@ -714,7 +715,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ - prefetch_disable_bits = get_prefetch_disable_bits(); + prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; @@ -776,6 +777,9 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) { int ret;
+ if (!IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + return -EOPNOTSUPP; + if (resctrl_arch_mon_capable()) { ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { @@ -848,6 +852,9 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held();
+ if (!IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + return -EOPNOTSUPP; + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) return true;
@@ -879,7 +886,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) }
/** - * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory + * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read + * pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One @@ -892,7 +900,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int measure_cycles_lat_fn(void *_plr) +int resctrl_arch_measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; @@ -1076,7 +1084,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, return 0; }
-static int measure_l2_residency(void *_plr) +int resctrl_arch_measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1114,7 +1122,7 @@ static int measure_l2_residency(void *_plr) return 0; }
-static int measure_l3_residency(void *_plr) +int resctrl_arch_measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1212,18 +1220,18 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) plr->cpu = cpu;
if (sel == 1) - thread = kthread_create_on_node(measure_cycles_lat_fn, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 2) - thread = kthread_create_on_node(measure_l2_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 3) - thread = kthread_create_on_node(measure_l3_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else @@ -1310,6 +1318,9 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) struct device *dev; int ret;
+ if (!IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + return -EOPNOTSUPP; + ret = pseudo_lock_region_alloc(plr); if (ret < 0) return ret; @@ -1322,7 +1333,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
plr->thread_done = 0;
- thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, rdtgrp, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { @@ -1435,6 +1446,9 @@ void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { struct pseudo_lock_region *plr = rdtgrp->plr;
+ if (!IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + return; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { /* * Default group cannot be a pseudo-locked region so we can diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 47d0b9a73c5c..4c1d3216b136 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1451,7 +1451,8 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; } rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (!strcmp(buf, "pseudo-locksetup")) { + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { ret = rdtgroup_locksetup_enter(rdtgrp); if (ret) goto out; @@ -2744,9 +2745,11 @@ static int rdt_get_tree(struct fs_context *fc) rdtgroup_default.mon.mon_data_kn = kn_mondata; }
- ret = rdt_pseudo_lock_init(); - if (ret) - goto out_mondata; + if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) { + ret = rdt_pseudo_lock_init(); + if (ret) + goto out_mondata; + }
ret = kernfs_get_tree(fc); if (ret < 0) @@ -2769,7 +2772,8 @@ static int rdt_get_tree(struct fs_context *fc) goto out;
out_psl: - rdt_pseudo_lock_release(); + if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + rdt_pseudo_lock_release(); out_mondata: if (resctrl_arch_mon_capable()) kernfs_remove(kn_mondata); @@ -3034,7 +3038,8 @@ static void rdt_kill_sb(struct super_block *sb) resctrl_arch_reset_resources();
rmdir_all_sub(); - rdt_pseudo_lock_release(); + if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK)) + rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root();