From: Jason Gunthorpe jgg@nvidia.com
Add arm_smmu_domain_alloc_id(), the pair of the existing arm_smmu_domain_free_id(). Move all the xa_alloc/idr calls to here. Replace the function pointer in arm_smmu_domain_finalise() and call it from arm_smmu_share_asid().
Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Kunkun Jiang jiangkunkun@huawei.com --- .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 5 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 52 +++++++++---------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 2 + 3 files changed, 28 insertions(+), 31 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index b434e63f6556..6252c85b5480 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -456,10 +456,7 @@ static int arm_smmu_share_asid(struct arm_smmu_device *smmu, * the S2 :( Or we simply ignore BTM entirely as we are doing now. */ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) - return xa_alloc(&smmu->asid_map, &smmu_domain->asid, - smmu_domain, - XA_LIMIT(1, (1 << smmu->asid_bits) - 1), - GFP_KERNEL); + return arm_smmu_domain_alloc_id(smmu, smmu_domain);
/* At this point the caller ensures we have a mmget() */ smmu_domain->asid = arm64_mm_context_get(mm); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 960b070dedfd..70d60845e03b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2499,6 +2499,30 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) return &smmu_domain->domain; }
+int arm_smmu_domain_alloc_id(struct arm_smmu_device *smmu, + struct arm_smmu_domain *smmu_domain) +{ + if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1 || + smmu_domain->domain.type == IOMMU_DOMAIN_SVA)) { + return xa_alloc(&smmu->asid_map, &smmu_domain->asid, + smmu_domain, + XA_LIMIT(1, (1 << smmu->asid_bits) - 1), + GFP_KERNEL); + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) { + int vmid; + + /* Reserve VMID 0 for stage-2 bypass STEs */ + vmid = ida_alloc_range(&smmu->vmid_map, 1, + (1 << smmu->vmid_bits) - 1, GFP_KERNEL); + if (vmid < 0) + return vmid; + smmu_domain->vmid = vmid; + return 0; + } + WARN_ON(true); + return -EINVAL; +} + /* * Return the domain's ASID or VMID back to the allocator. All IDs in the * allocator do not have an IOTLB entries referencing them. @@ -2537,28 +2561,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) kfree(smmu_domain); }
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain) -{ - return xa_alloc(&smmu->asid_map, &smmu_domain->asid, smmu_domain, - XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); -} - -static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain) -{ - int vmid; - - /* Reserve VMID 0 for stage-2 bypass STEs */ - vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, - GFP_KERNEL); - if (vmid < 0) - return vmid; - - smmu_domain->vmid = (u16)vmid; - return 0; -} - static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, struct arm_smmu_device *smmu) { @@ -2567,8 +2569,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, enum io_pgtable_fmt fmt; struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; - int (*finalise_stage_fn)(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain);
/* Restrict the stage to what we can actually support */ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) @@ -2582,13 +2582,11 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, ias = min_t(unsigned long, ias, VA_BITS); oas = smmu->ias; fmt = ARM_64_LPAE_S1; - finalise_stage_fn = arm_smmu_domain_finalise_s1; break; case ARM_SMMU_DOMAIN_S2: ias = smmu->ias; oas = smmu->oas; fmt = ARM_64_LPAE_S2; - finalise_stage_fn = arm_smmu_domain_finalise_s2; break; default: return -EINVAL; @@ -2619,7 +2617,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu, smmu_domain); + ret = arm_smmu_domain_alloc_id(smmu, smmu_domain); if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 309d5685aa16..e4eef09ca1b3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -840,6 +840,8 @@ int arm_smmu_set_pasid(struct arm_smmu_master *master, void arm_smmu_remove_pasid(struct arm_smmu_master *master, struct arm_smmu_domain *smmu_domain, ioasid_t pasid);
+int arm_smmu_domain_alloc_id(struct arm_smmu_device *smmu, + struct arm_smmu_domain *smmu_domain); void arm_smmu_domain_free_id(struct arm_smmu_domain *smmu_domain); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,