
Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBN6HC -------------------------------- Instead of broadcasting to all online CPUs, only inform the CPUs running tasks of the group which is changing its rmid. The detection is inaccurate as tasks might move or schedule before the smp function call takes place. In such a case the function call is pointless, but there is no other side effect. The IPI partid update operation needs to be split because update_closid_rmid() updates the rdtgroup->partid to the pqr_state of each CPU in the cpumask. Since we have already successfully updated the partid of the task, we only need to let the CPU call mpam_sched_in(). Fixes: ed282cdb7305 ("[Huawei] arm64/mpam: Fix the potential deadlock when updating the rmid") Signed-off-by: Zeng Heng <zengheng4@huawei.com> --- arch/arm64/kernel/mpam/mpam_resctrl.c | 29 ++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/mpam/mpam_resctrl.c b/arch/arm64/kernel/mpam/mpam_resctrl.c index 220dc48e3614..8364b3f8b7b3 100644 --- a/arch/arm64/kernel/mpam/mpam_resctrl.c +++ b/arch/arm64/kernel/mpam/mpam_resctrl.c @@ -1914,10 +1914,14 @@ static ssize_t resctrl_group_rmid_write(struct kernfs_open_file *of, int old_rmid; int old_reqpartid; struct task_struct *p, *t; + cpumask_var_t tmpmask; if (kstrtoint(strstrip(buf), 0, &rmid) || rmid < 0) return -EINVAL; + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + rdtgrp = resctrl_group_kn_lock_live(of->kn); if (!rdtgrp) { ret = -ENOENT; @@ -1989,19 +1993,25 @@ static ssize_t resctrl_group_rmid_write(struct kernfs_open_file *of, read_unlock(&tasklist_lock); goto rollback; } + + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + cpumask_set_cpu(task_cpu(t), tmpmask); } } read_unlock(&tasklist_lock); - update_closid_rmid(cpu_online_mask, rdtgrp); + /* Update PARTID on CPUs which have moved task running on them */ + update_closid_rmid(tmpmask, NULL); + /* Update PARTID on the cpu_list of the group */ + update_closid_rmid(&rdtgrp->cpu_mask, rdtgrp); + rmid_free(old_rmid); unlock: resctrl_group_kn_unlock(of->kn); - if (ret) - return ret; + free_cpumask_var(tmpmask); - return nbytes; + return ret ? : nbytes; rollback: rdtgrp->mon.rmid = old_rmid; @@ -2013,15 +2023,24 @@ static ssize_t resctrl_group_rmid_write(struct kernfs_open_file *of, rdtgrp->resync = 1; WARN_ON_ONCE(resctrl_update_groups_config(rdtgrp)); + cpumask_clear(tmpmask); + read_lock(&tasklist_lock); for_each_process_thread(p, t) { - if (t->closid == rdtgrp->closid.intpartid) + if (t->closid == rdtgrp->closid.intpartid) { WARN_ON_ONCE(resctrl_group_update_task(t, rdtgrp)); + + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + cpumask_set_cpu(task_cpu(t), tmpmask); + } } read_unlock(&tasklist_lock); + update_closid_rmid(tmpmask, NULL); + rmid_free(rmid); resctrl_group_kn_unlock(of->kn); + free_cpumask_var(tmpmask); return ret; } -- 2.25.1