
This reverts commit 8bc23b6f7bfa279ec458d914788bec2eba15e327. --- arch/x86/kernel/cpu/resctrl/core.c | 17 ++++++---- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 38 ++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/internal.h | 2 -- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 +++++-- 4 files changed, 52 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 670519f48a1c..4edc2b9d53c4 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -362,8 +362,6 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; - lockdep_assert_cpus_held(); - list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) @@ -380,11 +378,19 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r) void rdt_ctrl_update(void *arg) { - struct rdt_hw_resource *hw_res; struct msr_param *m = arg; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res); + struct rdt_resource *r = m->res; + int cpu = smp_processor_id(); + struct rdt_domain *d; - hw_res = resctrl_to_arch_res(m->res); - hw_res->msr_update(m->dom, m, m->res); + d = get_domain_from_cpu(cpu, r); + if (d) { + hw_res->msr_update(d, m, r); + return; + } + pr_warn_once("cpu %d not found in any domain for resource %s\n", + cpu, r->name); } /* @@ -457,7 +463,6 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) hw_dom->ctrl_val = dc; setup_default_ctrlval(r, dc); - m.dom = d; m.low = 0; m.high = hw_res->num_closid; hw_res->msr_update(d, &m, r); diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 15d916449e47..8bd717222f0c 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -277,6 +277,22 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type) } } +static bool apply_config(struct rdt_hw_domain *hw_dom, + struct resctrl_staged_config *cfg, u32 idx, + cpumask_var_t cpu_mask) +{ + struct rdt_domain *dom = &hw_dom->d_resctrl; + + if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) { + cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask); + hw_dom->ctrl_val[idx] = cfg->new_ctrl; + + return true; + } + + return false; +} + int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type t, u32 cfg_val) { @@ -291,7 +307,6 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, hw_dom->ctrl_val[idx] = cfg_val; msr_param.res = r; - msr_param.dom = d; msr_param.low = idx; msr_param.high = idx + 1; hw_res->msr_update(d, &msr_param, r); @@ -305,39 +320,48 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) struct rdt_hw_domain *hw_dom; struct msr_param msr_param; enum resctrl_conf_type t; + cpumask_var_t cpu_mask; struct rdt_domain *d; u32 idx; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + + msr_param.res = NULL; list_for_each_entry(d, &r->domains, list) { hw_dom = resctrl_to_arch_dom(d); - msr_param.res = NULL; for (t = 0; t < CDP_NUM_TYPES; t++) { cfg = &hw_dom->d_resctrl.staged_config[t]; if (!cfg->have_new_ctrl) continue; idx = get_config_index(closid, t); - if (cfg->new_ctrl == hw_dom->ctrl_val[idx]) + if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; - hw_dom->ctrl_val[idx] = cfg->new_ctrl; if (!msr_param.res) { msr_param.low = idx; msr_param.high = msr_param.low + 1; msr_param.res = r; - msr_param.dom = d; } else { msr_param.low = min(msr_param.low, idx); msr_param.high = max(msr_param.high, idx + 1); } } - if (msr_param.res) - smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1); } + if (cpumask_empty(cpu_mask)) + goto done; + + /* Update resource control msr on all the CPUs. */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); + +done: + free_cpumask_var(cpu_mask); + return 0; } diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index ab2d315f7a2e..1a8687f8073a 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -379,13 +379,11 @@ static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r) /** * struct msr_param - set a range of MSRs from a domain * @res: The resource to use - * @dom: The domain to update * @low: Beginning index from base MSR * @high: End index */ struct msr_param { struct rdt_resource *res; - struct rdt_domain *dom; u32 low; u32 high; }; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 02f213f1c51c..011e17efb1a6 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2813,12 +2813,16 @@ static int reset_all_ctrls(struct rdt_resource *r) struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom; struct msr_param msr_param; + cpumask_var_t cpu_mask; struct rdt_domain *d; int i; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + msr_param.res = r; msr_param.low = 0; msr_param.high = hw_res->num_closid; @@ -2830,13 +2834,17 @@ static int reset_all_ctrls(struct rdt_resource *r) */ list_for_each_entry(d, &r->domains, list) { hw_dom = resctrl_to_arch_dom(d); + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); for (i = 0; i < hw_res->num_closid; i++) hw_dom->ctrl_val[i] = r->default_ctrl; - msr_param.dom = d; - smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1); } + /* Update CBM on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); + + free_cpumask_var(cpu_mask); + return 0; } -- 2.25.1