From: Jason Gunthorpe jgg@nvidia.com
Previous patches emptied these structs out, they now only hold the two different kinds of IOTLB cache tag (vmid/asid).
As the cache tag is now fully a property of the domain, and its lifecycle is linked to the domain, remove the structs and inline it into struct arm_smmu_domain.
Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Kunkun Jiang jiangkunkun@huawei.com --- .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 20 +++++----- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 37 ++++++++----------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 12 +----- 3 files changed, 28 insertions(+), 41 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index e35ae178a7b1..b434e63f6556 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -18,7 +18,7 @@ static int arm_smmu_realloc_s1_domain_asid(struct arm_smmu_device *smmu, struct arm_smmu_domain *smmu_domain) { struct arm_smmu_master_domain *master_domain; - u32 old_asid = smmu_domain->cd.asid; + u32 old_asid = smmu_domain->asid; struct arm_smmu_cd target_cd; unsigned long flags; int ret; @@ -37,7 +37,7 @@ static int arm_smmu_realloc_s1_domain_asid(struct arm_smmu_device *smmu, * and we achieve eventual consistency. For the brief period where the * old ASID is still in the CD entries it will become incoherent. */ - ret = xa_alloc(&smmu->asid_map, &smmu_domain->cd.asid, smmu_domain, + ret = xa_alloc(&smmu->asid_map, &smmu_domain->asid, smmu_domain, XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); if (ret) return ret; @@ -177,7 +177,7 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, }
if (!smmu_domain->btm_invalidation) { - ioasid_t asid = READ_ONCE(smmu_domain->cd.asid); + ioasid_t asid = READ_ONCE(smmu_domain->asid);
if (!size) arm_smmu_tlb_inv_asid(smmu_domain->smmu, asid); @@ -214,14 +214,14 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
/* An SVA ASID never changes, no asid_lock required */ arm_smmu_make_sva_cd(&target, master, NULL, - smmu_domain->cd.asid, + smmu_domain->asid, smmu_domain->btm_invalidation); arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, &target); } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->asid); arm_smmu_atc_inv_domain(smmu_domain, 0, 0); }
@@ -388,7 +388,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, * get reassigned */ arm_smmu_make_sva_cd(&target, master, smmu_domain->domain.mm, - smmu_domain->cd.asid, + smmu_domain->asid, smmu_domain->btm_invalidation);
ret = arm_smmu_set_pasid(master, smmu_domain, id, &target); @@ -456,16 +456,16 @@ static int arm_smmu_share_asid(struct arm_smmu_device *smmu, * the S2 :( Or we simply ignore BTM entirely as we are doing now. */ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) - return xa_alloc(&smmu->asid_map, &smmu_domain->cd.asid, + return xa_alloc(&smmu->asid_map, &smmu_domain->asid, smmu_domain, XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
/* At this point the caller ensures we have a mmget() */ - smmu_domain->cd.asid = arm64_mm_context_get(mm); + smmu_domain->asid = arm64_mm_context_get(mm);
mutex_lock(&smmu->asid_lock); - old_s1_domain = xa_store(&smmu->asid_map, smmu_domain->cd.asid, + old_s1_domain = xa_store(&smmu->asid_map, smmu_domain->asid, smmu_domain, GFP_KERNEL); if (xa_err(old_s1_domain)) { ret = xa_err(old_s1_domain); @@ -499,7 +499,7 @@ static int arm_smmu_share_asid(struct arm_smmu_device *smmu, goto out_unlock;
out_restore_s1: - xa_store(&smmu->asid_map, smmu_domain->cd.asid, old_s1_domain, + xa_store(&smmu->asid_map, smmu_domain->asid, old_s1_domain, GFP_KERNEL); out_put_asid: arm64_mm_context_put(mm); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 69b2eaf22cba..960b070dedfd 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1526,7 +1526,6 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, struct arm_smmu_master *master, struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; const struct io_pgtable_cfg *pgtbl_cfg = &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = @@ -1551,7 +1550,7 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET | - FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) + FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->asid) );
if (master->smmu->features & ARM_SMMU_FEAT_HD) @@ -1798,7 +1797,6 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, struct arm_smmu_domain *smmu_domain, bool ats_enabled) { - struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; const struct io_pgtable_cfg *pgtbl_cfg = &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr = @@ -1824,7 +1822,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); target->data[2] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | + FIELD_PREP(STRTAB_STE_2_S2VMID, smmu_domain->vmid) | FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) | STRTAB_STE_2_S2AA64 | #ifdef __BIG_ENDIAN @@ -2295,10 +2293,10 @@ static void arm_smmu_tlb_inv_context(void *cookie) * careful, 007. */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - arm_smmu_tlb_inv_asid(smmu, READ_ONCE(smmu_domain->cd.asid)); + arm_smmu_tlb_inv_asid(smmu, READ_ONCE(smmu_domain->asid)); } else { cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; - cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; + cmd.tlbi.vmid = smmu_domain->vmid; arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); } arm_smmu_atc_inv_domain(smmu_domain, 0, 0); @@ -2390,10 +2388,10 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA; - cmd.tlbi.asid = smmu_domain->cd.asid; + cmd.tlbi.asid = smmu_domain->asid; } else { cmd.opcode = CMDQ_OP_TLBI_S2_IPA; - cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; + cmd.tlbi.vmid = smmu_domain->vmid; } __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
@@ -2511,22 +2509,22 @@ void arm_smmu_domain_free_id(struct arm_smmu_domain *smmu_domain)
if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1 || smmu_domain->domain.type == IOMMU_DOMAIN_SVA) && - smmu_domain->cd.asid) { - arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); + smmu_domain->asid) { + arm_smmu_tlb_inv_asid(smmu, smmu_domain->asid);
/* Prevent SVA from touching the CD while we're freeing it */ mutex_lock(&smmu->asid_lock); - xa_erase(&smmu->asid_map, smmu_domain->cd.asid); + xa_erase(&smmu->asid_map, smmu_domain->asid); mutex_unlock(&smmu->asid_lock); } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2 && - smmu_domain->s2_cfg.vmid) { + smmu_domain->vmid) { struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_TLBI_S12_VMALL, - .tlbi.vmid = smmu_domain->s2_cfg.vmid + .tlbi.vmid = smmu_domain->vmid };
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); - ida_free(&smmu->vmid_map, smmu_domain->s2_cfg.vmid); + ida_free(&smmu->vmid_map, smmu_domain->vmid); } }
@@ -2542,9 +2540,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; - - return xa_alloc(&smmu->asid_map, &cd->asid, smmu_domain, + return xa_alloc(&smmu->asid_map, &smmu_domain->asid, smmu_domain, XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); }
@@ -2552,7 +2548,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, struct arm_smmu_domain *smmu_domain) { int vmid; - struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
/* Reserve VMID 0 for stage-2 bypass STEs */ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, @@ -2560,7 +2555,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, if (vmid < 0) return vmid;
- cfg->vmid = (u16)vmid; + smmu_domain->vmid = (u16)vmid; return 0; }
@@ -3078,8 +3073,8 @@ int arm_smmu_set_pasid(struct arm_smmu_master *master, * caller set ASID under the lock in case it changed. */ cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID); - cd->data[0] |= cpu_to_le64( - FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); + cd->data[0] |= + cpu_to_le64(FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->asid));
arm_smmu_write_cd_entry(master, pasid, cdptr, cd); arm_smmu_update_ste(master, sid_domain, state.want_ats); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 518c5f07a09b..309d5685aa16 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -628,10 +628,6 @@ struct arm_smmu_strtab_l1_desc { dma_addr_t l2ptr_dma; };
-struct arm_smmu_ctx_desc { - u32 asid; -}; - struct arm_smmu_l1_ctx_desc { struct arm_smmu_cd *l2ptr; dma_addr_t l2ptr_dma; @@ -667,10 +663,6 @@ static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table) return cd_table->used_ssids; }
-struct arm_smmu_s2_cfg { - u16 vmid; -}; - struct arm_smmu_strtab_cfg { union { struct arm_smmu_ste *linear; @@ -805,8 +797,8 @@ struct arm_smmu_domain {
enum arm_smmu_domain_stage stage; union { - struct arm_smmu_ctx_desc cd; - struct arm_smmu_s2_cfg s2_cfg; + u32 asid; + u16 vmid; };
struct iommu_domain domain;