From: Zhen Lei thunder.leizhen@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8Z6DC CVE: NA
-------------------------------------------------------------------------
The SYNC command only ensures that the command that precedes it in the same ECMDQ must be executed, but cannot synchronize the commands in other ECMDQs. If an unmap involves multiple commands, some commands are executed on one core, and the other commands are executed on another core. In this case, after the SYNC execution is complete, the execution of all preceded commands can not be ensured.
Prevent the process that performs a set of associated commands insertion from being migrated to other cores ensures that all commands are inserted into the same ECMDQ.
Signed-off-by: Zhen Lei thunder.leizhen@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 39 ++++++++++++++++++--- 1 file changed, 35 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index d0f670d74048..0f554a33673d 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -296,6 +296,24 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) return 0; }
+#ifdef CONFIG_ARM_SMMU_V3_ECMDQ +static void arm_smmu_preempt_disable(struct arm_smmu_device *smmu) +{ + if (smmu->ecmdq_enabled) + preempt_disable(); +} + +static void arm_smmu_preempt_enable(struct arm_smmu_device *smmu) +{ + if (smmu->ecmdq_enabled) + preempt_enable(); +} +#else +static void arm_smmu_preempt_disable(struct arm_smmu_device *smmu) {} +static void arm_smmu_preempt_enable(struct arm_smmu_device *smmu) {} +#endif + + /* High-level queue accessors */ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) { @@ -1093,6 +1111,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
cmds.num = 0;
+ arm_smmu_preempt_disable(smmu); spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_for_each_entry(master, &smmu_domain->devices, domain_head) { for (i = 0; i < master->num_streams; i++) { @@ -1103,6 +1122,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_cmdq_batch_submit(smmu, &cmds); + arm_smmu_preempt_enable(smmu); }
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, @@ -1903,32 +1923,37 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) { - int i; + int i, ret; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_batch cmds;
arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd);
cmds.num = 0; + arm_smmu_preempt_disable(master->smmu); for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); }
- return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); + ret = arm_smmu_cmdq_batch_submit(master->smmu, &cmds); + arm_smmu_preempt_enable(master->smmu); + return ret; }
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, unsigned long iova, size_t size) { - int i; + int i, ret; unsigned long flags; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_master *master; struct arm_smmu_cmdq_batch cmds;
+#ifdef CONFIG_ARM_SMMU_V3_ECMDQ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) return 0; +#endif
/* * Ensure that we've completed prior invalidation of the main TLBs @@ -1951,6 +1976,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
cmds.num = 0;
+ arm_smmu_preempt_disable(smmu_domain->smmu); spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_for_each_entry(master, &smmu_domain->devices, domain_head) { if (!master->ats_enabled) @@ -1963,7 +1989,10 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); + ret = arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); + arm_smmu_preempt_enable(smmu_domain->smmu); + + return ret; }
/* IO_PGTABLE API */ @@ -2028,6 +2057,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
cmds.num = 0;
+ arm_smmu_preempt_disable(smmu); while (iova < end) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { /* @@ -2059,6 +2089,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, iova += inv_range; } arm_smmu_cmdq_batch_submit(smmu, &cmds); + arm_smmu_preempt_enable(smmu); }
static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,