From: James Morse james.morse@arm.com
Register and enable error IRQs. All the MPAM error interrupts indicate a software bug, e.g. out of range partid. If the error interrupt is ever signalled, attempt to disable MPAM.
Only the irq handler accesses the ESR register, so no locking is needed. The work to disable MPAM after an error needs to happen at process context, use a threaded interrupt.
There is no support for percpu threaded interrupts, for now schedule the work to be done from the irq handler.
Enabling the IRQs in the MSC may involve cross calling to a CPU that can access the MSC.
CC: Rohit Mathew rohit.mathew@arm.com Tested-by: Rohit Mathew rohit.mathew@arm.com Signed-off-by: James Morse james.morse@arm.com --- drivers/platform/mpam/mpam_devices.c | 311 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 8 + 2 files changed, 309 insertions(+), 10 deletions(-)
diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 6091e87308ba..ac82c041999d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -14,6 +14,9 @@ #include <linux/device.h> #include <linux/errno.h> #include <linux/gfp.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/mutex.h> @@ -64,6 +67,12 @@ static DEFINE_SPINLOCK(partid_max_lock); */ static DECLARE_WORK(mpam_enable_work, &mpam_enable);
+/* + * All mpam error interrupts indicate a software bug. On receipt, disable the + * driver. + */ +static DECLARE_WORK(mpam_broken_work, &mpam_disable); + /* * An MSC is a container for resources, each identified by their RIS index. * Components are a group of RIS that control the same thing. @@ -127,6 +136,24 @@ static u64 mpam_msc_read_idr(struct mpam_msc *msc) return (idr_high << 32) | idr_low; }
+static void mpam_msc_zero_esr(struct mpam_msc *msc) +{ + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR + 4); +} + +static u64 mpam_msc_read_esr(struct mpam_msc *msc) +{ + u64 esr_high = 0, esr_low; + + esr_low = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + esr_high = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR + 4); + + return (esr_high << 32) | esr_low; +} + static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) { u32 partsel; @@ -622,6 +649,7 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); msc->partid_max = min(msc->partid_max, partid_max); msc->pmg_max = min(msc->pmg_max, pmg_max); + msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXT_ESR, idr);
ris = mpam_get_or_create_ris(msc, ris_idx); if (IS_ERR(ris)) { @@ -764,6 +792,12 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) srcu_read_unlock(&mpam_srcu, idx); }
+static void _enable_percpu_irq(void *_irq) +{ + int *irq = _irq; + enable_percpu_irq(*irq, IRQ_TYPE_NONE); +} + static int mpam_cpu_online(unsigned int cpu) { int idx; @@ -774,11 +808,13 @@ static int mpam_cpu_online(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue;
- if (atomic_fetch_inc(&msc->online_refs) == 0) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + _enable_percpu_irq(&msc->reenable_error_ppi); + + if (atomic_fetch_inc(&msc->online_refs) == 0) mpam_reset_msc(msc, true); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx);
@@ -828,11 +864,13 @@ static int mpam_cpu_offline(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue;
- if (atomic_dec_and_test(&msc->online_refs)) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + disable_percpu_irq(msc->reenable_error_ppi); + + if (atomic_dec_and_test(&msc->online_refs)) mpam_reset_msc(msc, false); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx);
@@ -851,6 +889,50 @@ static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) mutex_unlock(&mpam_cpuhp_state_lock); }
+static int __setup_ppi(struct mpam_msc *msc) +{ + int cpu; + + msc->error_dev_id = alloc_percpu_gfp(struct mpam_msc *, GFP_KERNEL); + if (!msc->error_dev_id) + return -ENOMEM; + + for_each_cpu(cpu, &msc->accessibility) { + struct mpam_msc *empty = *per_cpu_ptr(msc->error_dev_id, cpu); + if (empty != NULL) { + pr_err_once("%s shares PPI with %s!\n", dev_name(&msc->pdev->dev), + dev_name(&empty->pdev->dev)); + return -EBUSY; + } + *per_cpu_ptr(msc->error_dev_id, cpu) = msc; + } + + return 0; +} + +static int mpam_msc_setup_error_irq(struct mpam_msc *msc) +{ + int irq; + + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + return 0; + + /* Allocate and initialise the percpu device pointer for PPI */ + if (irq_is_percpu(irq)) + + return __setup_ppi(msc); + + /* sanity check: shared interrupts can be routed anywhere? */ + if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { + pr_err_once("msc:%u is a private resource with a shared error interrupt", + msc->id); + return -EINVAL; + } + + return 0; +} + static int mpam_dt_count_msc(void) { int count = 0; @@ -1021,6 +1103,13 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) spin_lock_init(&msc->part_sel_lock); spin_lock_init(&msc->mon_sel_lock);
+ err = mpam_msc_setup_error_irq(msc); + if (err) { + devm_kfree(&pdev->dev, msc); + msc = ERR_PTR(err); + break; + } + if (device_property_read_u32(&pdev->dev, "pcc-channel", &msc->pcc_subspace_id)) msc->iface = MPAM_IFACE_MMIO; @@ -1173,11 +1262,198 @@ static void mpam_enable_merge_features(void) } }
+static char *mpam_errcode_names[16] = { + [0] = "No error", + [1] = "PARTID_SEL_Range", + [2] = "Req_PARTID_Range", + [3] = "MSMONCFG_ID_RANGE", + [4] = "Req_PMG_Range", + [5] = "Monitor_Range", + [6] = "intPARTID_Range", + [7] = "Unexpected_INTERNAL", + [8] = "Undefined_RIS_PART_SEL", + [9] = "RIS_No_Control", + [10] = "Undefined_RIS_MON_SEL", + [11] = "RIS_No_Monitor", + [12 ... 15] = "Reserved" +}; + +static int mpam_enable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(1, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static int mpam_disable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) +{ + u64 reg; + u16 partid; + u8 errcode, pmg, ris; + + if (WARN_ON_ONCE(!msc) || + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &msc->accessibility))) + return IRQ_NONE; + + reg = mpam_msc_read_esr(msc); + + errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); + if (!errcode) + return IRQ_NONE; + + /* Clear level triggered irq */ + mpam_msc_zero_esr(msc); + + partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); + pmg = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_PMG, reg); + + pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", + msc->id, mpam_errcode_names[errcode], partid, pmg, ris); + + if (irq_is_percpu(irq)) { + mpam_disable_msc_ecr(msc); + schedule_work(&mpam_broken_work); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = *(struct mpam_msc **)dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_spi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_disable_thread(int irq, void *dev_id); + +static int mpam_register_irqs(void) +{ + int err, irq; + struct mpam_msc *msc; + + lockdep_assert_cpus_held(); + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ + /* We anticipate sharing the interrupt with other MSCs */ + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, &mpam_ppi_handler, + "mpam:msc:error", + msc->error_dev_id); + if (err) + return err; + + mutex_lock(&msc->lock); + msc->reenable_error_ppi = irq; + smp_call_function_many(&msc->accessibility, + &_enable_percpu_irq, &irq, + true); + mutex_unlock(&msc->lock); + } else { + err = devm_request_threaded_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, + &mpam_disable_thread, + IRQF_SHARED, + "mpam:msc:error", msc); + if (err) + return err; + } + + mutex_lock(&msc->lock); + msc->error_irq_requested = true; + mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); + msc->error_irq_hw_enabled = true; + mutex_unlock(&msc->lock); + } + + return 0; +} + +static void mpam_unregister_irqs(void) +{ + int irq; + struct mpam_msc *msc; + + cpus_read_lock(); + /* take the lock as free_irq() can sleep */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + mutex_lock(&msc->lock); + if (msc->error_irq_hw_enabled) { + mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); + msc->error_irq_hw_enabled = false; + } + + if (msc->error_irq_requested) { + if (irq_is_percpu(irq)) { + msc->reenable_error_ppi = 0; + free_percpu_irq(irq, msc->error_dev_id); + } else { + devm_free_irq(&msc->pdev->dev, irq, msc); + } + msc->error_irq_requested = false; + } + mutex_unlock(&msc->lock); + } + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); +} + static void mpam_enable_once(void) { + int err; + + /* + * If all the MSC have been probed, enabling the IRQs happens next. + * That involves cross-calling to a CPU that can reach the MSC, and + * the locks must be taken in this order: + */ + cpus_read_lock(); mutex_lock(&mpam_list_lock); mpam_enable_merge_features(); + + err = mpam_register_irqs(); + if (err) + pr_warn("Failed to register irqs: %d\n", err); + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); + + if (err) { + schedule_work(&mpam_broken_work); + return; + }
mutex_lock(&mpam_cpuhp_state_lock); cpuhp_remove_state(mpam_cpuhp_state); @@ -1221,15 +1497,31 @@ static void mpam_reset_class(struct mpam_class *class) * All of MPAMs errors indicate a software bug, restore any modified * controls to their reset values. */ -void mpam_disable(void) +static irqreturn_t mpam_disable_thread(int irq, void *dev_id) { int idx; struct mpam_class *class;
+ mutex_lock(&mpam_cpuhp_state_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_unregister_irqs(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) mpam_reset_class(class); srcu_read_unlock(&mpam_srcu, idx); + + return IRQ_HANDLED; +} + +void mpam_disable(struct work_struct *ignored) +{ + mpam_disable_thread(0, NULL); }
/* @@ -1243,7 +1535,6 @@ void mpam_enable(struct work_struct *work) struct mpam_msc *msc; bool all_devices_probed = true;
- /* Have we probed all the hw devices? */ mutex_lock(&mpam_list_lock); list_for_each_entry(msc, &mpam_all_msc, glbl_list) { mutex_lock(&msc->lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index dd1c017c6e08..e81aed7e6be6 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -29,10 +29,17 @@ struct mpam_msc struct pcc_mbox_chan *pcc_chan; u32 nrdy_usec; cpumask_t accessibility; + bool has_extd_esr; + + int reenable_error_ppi; + struct mpam_msc * __percpu *error_dev_id; + atomic_t online_refs; struct mutex lock; bool probed; + bool error_irq_requested; + bool error_irq_hw_enabled; u16 partid_max; u8 pmg_max; unsigned long ris_idxs[128 / BITS_PER_LONG]; @@ -167,6 +174,7 @@ extern u8 mpam_pmg_max;
/* Scheduled work callback to enable mpam once all MSC have been probed */ void mpam_enable(struct work_struct *work); +void mpam_disable(struct work_struct *work);
/* * MPAM MSCs have the following register layout. See: