From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8T2RT
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/...
---------------------------
Resetting RIS entries from the cpuhp callback is easy as the callback occurs on the correct CPU. This won't be true for any other caller that wants to reset or configure an MSC.
Add a helper that schedules the provided function if necessary. Prevent the cpuhp callbacks from changing the MSC state by taking the cpuhp lock.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Zeng Heng zengheng4@huawei.com --- drivers/platform/mpam/mpam_devices.c | 40 +++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-)
diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e281e6e910e0..9156591fc4b9 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -699,21 +699,49 @@ static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) spin_unlock(&msc->part_sel_lock); }
-static void mpam_reset_ris(struct mpam_msc_ris *ris) +/* + * Called via smp_call_on_cpu() to prevent migration, while still being + * pre-emptible. + */ +static int mpam_reset_ris(void *arg) { u16 partid, partid_max; - struct mpam_msc *msc = ris->msc; - - lockdep_assert_held(&msc->lock); + struct mpam_msc_ris *ris = arg;
if (ris->in_reset_state) - return; + return 0;
spin_lock(&partid_max_lock); partid_max = mpam_partid_max; spin_unlock(&partid_max_lock); for (partid = 0; partid < partid_max; partid++) mpam_reset_ris_partid(ris, partid); + + return 0; +} + +/* + * Get the preferred CPU for this MSC. If it is accessible from this CPU, + * this CPU is preferred. This can be preempted/migrated, it will only result + * in more work. + */ +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) +{ + int cpu = raw_smp_processor_id(); + + if (cpumask_test_cpu(cpu, &msc->accessibility)) + return cpu; + + return cpumask_first_and(&msc->accessibility, cpu_online_mask); +} + +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) +{ + lockdep_assert_irqs_enabled(); + lockdep_assert_cpus_held(); + lockdep_assert_held(&msc->lock); + + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); }
static void mpam_reset_msc(struct mpam_msc *msc, bool online) @@ -725,7 +753,7 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online)
idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(ris, &msc->ris, msc_list) { - mpam_reset_ris(ris); + mpam_touch_msc(msc, &mpam_reset_ris, ris);
/* * Set in_reset_state when coming online. The reset state