Hi Eric,
Implement domain-selective, pasid selective and page-selective IOTLB invalidations. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- v13 -> v14: - Add domain invalidation - do global inval when asid is not provided with addr granularity v7 -> v8: - ASID based invalidation using iommu_inv_pasid_info - check ARCHID/PASID flags in addr based invalidation - use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync v6 -> v7 - check the uapi version v3 -> v4: - adapt to changes in the uapi - add support for leaf parameter - do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context anymore v2 -> v3: - replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync v1 -> v2: - properly pass the asid --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 74 +++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 4c19a1114de4..df3adc49111c 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2949,6 +2949,79 @@ static void arm_smmu_detach_pasid_table(struct iommu_domain *domain) mutex_unlock(&smmu_domain->init_mutex); } +static int +arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev, + struct iommu_cache_invalidate_info *inv_info) +{ + struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL}; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_device *smmu = smmu_domain->smmu; + + if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED) + return -EINVAL; + + if (!smmu) + return -EINVAL; + + if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) + return -EINVAL; + + if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID || + inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) { + return -ENOENT; + } + + if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB)) + return -EINVAL; + + /* IOTLB invalidation */ + + switch (inv_info->granularity) { + case IOMMU_INV_GRANU_PASID: + { + struct iommu_inv_pasid_info *info = + &inv_info->granu.pasid_info; + + if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID) + return -ENOENT; + if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID)) + return -EINVAL; + + __arm_smmu_tlb_inv_context(smmu_domain, info->archid); + return 0; + } + case IOMMU_INV_GRANU_ADDR: + { + struct iommu_inv_addr_info *info = &inv_info->granu.addr_info; + size_t size = info->nb_granules * info->granule_size; + bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF; + + if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID) + return -ENOENT; + + if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID)) + break; + + arm_smmu_tlb_inv_range_domain(info->addr, size, + info->granule_size, leaf, + info->archid, smmu_domain);
+ + arm_smmu_cmdq_issue_sync(smmu); + return 0; + } + case IOMMU_INV_GRANU_DOMAIN: + break;
+ default: + return -EINVAL; + } + + /* Global S1 invalidation */ + cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + arm_smmu_cmdq_issue_sync(smmu); + return 0; +} + static bool arm_smmu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) { @@ -3048,6 +3121,7 @@ static struct iommu_ops arm_smmu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .attach_pasid_table = arm_smmu_attach_pasid_table, .detach_pasid_table = arm_smmu_detach_pasid_table, + .cache_invalidate = arm_smmu_cache_invalidate, .dev_has_feat = arm_smmu_dev_has_feature, .dev_feat_enabled = arm_smmu_dev_feature_enabled, .dev_enable_feat = arm_smmu_dev_enable_feature,