From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8T2RT
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/...
---------------------------
resctrl's limbo code needs to be told when the data left in a cache is small enough for the partid+pmg value to be re-allocated.
x86 uses the cache size divded by the number of rmid users the cache may have. Do the same, but for the smallest cache, and with the number of partid-and-pmg users.
Querying the cache size can't happen until after cacheinfo_sysfs_init() has run, so mpam_resctrl_setup() must wait until then.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Zeng Heng zengheng4@huawei.com --- drivers/platform/mpam/mpam_resctrl.c | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+)
diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 074d8ba32115..9e69e0e1bb07 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -15,6 +15,7 @@ #include <linux/resctrl.h> #include <linux/slab.h> #include <linux/types.h> +#include <linux/wait.h>
#include <asm/mpam.h>
@@ -47,6 +48,13 @@ static bool cdp_enabled; */ static bool resctrl_enabled;
+/* + * mpam_resctrl_pick_caches() needs to know the size of the caches. cacheinfo + * populates this from a device_initcall(). mpam_resctrl_setup() must wait. + */ +static bool cacheinfo_ready; +static DECLARE_WAIT_QUEUE_HEAD(wait_cacheinfo_ready); + /* A dummy mon context to use when the monitors were allocated up front */ u32 __mon_is_rmid_idx = USE_RMID_IDX; void *mon_is_rmid_idx = &__mon_is_rmid_idx; @@ -402,6 +410,24 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, } }
+/* + * The rmid realloc threshold should be for the smallest cache exposed to + * resctrl. + */ +static void update_rmid_limits(unsigned int size) +{ + u32 num_unique_pmg = resctrl_arch_system_num_rmid_idx(); + + if (WARN_ON_ONCE(!size)) + return; + + if (resctrl_rmid_realloc_limit && size > resctrl_rmid_realloc_limit) + return; + + resctrl_rmid_realloc_limit = size; + resctrl_rmid_realloc_threshold = size / num_unique_pmg; +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -558,11 +584,15 @@ static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) static void mpam_resctrl_pick_caches(void) { int idx; + unsigned int cache_size; struct mpam_class *class; struct mpam_resctrl_res *res;
+ lockdep_assert_cpus_held(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; bool has_cpor = cache_has_usable_cpor(class);
if (class->type != MPAM_CLASS_CACHE) { @@ -589,6 +619,16 @@ static void mpam_resctrl_pick_caches(void) continue; }
+ /* Assume cache levels are the same size for all CPUs... */ + cache_size = get_cpu_cacheinfo_size(smp_processor_id(), class->level); + if (!cache_size) { + pr_debug("pick_caches: Could not read cache size\n"); + continue; + } + + if (mpam_has_feature(mpam_feat_msmon_csu, cprops)) + update_rmid_limits(cache_size); + if (class->level == 2) { res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; res->resctrl_res.name = "L2"; @@ -794,6 +834,8 @@ int mpam_resctrl_setup(void) struct mpam_resctrl_res *res; enum resctrl_res_level i;
+ wait_event(wait_cacheinfo_ready, cacheinfo_ready); + cpus_read_lock(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -1156,3 +1198,12 @@ int mpam_resctrl_offline_cpu(unsigned int cpu)
return 0; } + +static int __init __cacheinfo_ready(void) +{ + cacheinfo_ready = true; + wake_up(&wait_cacheinfo_ready); + + return 0; +} +device_initcall_sync(__cacheinfo_ready);