From: Jason Gunthorpe jgg@nvidia.com
mainline inclusion from mainline-v6.12-rc1 commit e3b1be2e73dbe599f8b8886e120d206aa87e90f9 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB4WDJ CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
The members here are being used for both the linear and the 2 level case, with the meaning of each item slightly different in the two cases.
Split it into a clean union where both cases have their own struct with their own logical names and correct types.
Adjust all the users to detect linear/2lvl and use the right sub structure and types consistently.
Remove CTXDESC_CD_DWORDS by changing the last places to use sizeof(struct arm_smmu_cd).
Tested-by: Nicolin Chen nicolinc@nvidia.com Reviewed-by: Nicolin Chen nicolinc@nvidia.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com Link: https://lore.kernel.org/r/8-v4-6416877274e1+1af-smmuv3_tidy_jgg@nvidia.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Kunkun Jiang jiangkunkun@huawei.com --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 115 ++++++++++---------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 24 ++-- 2 files changed, 72 insertions(+), 67 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 062f9800c20d..f9966cf1f5dd 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1421,19 +1421,19 @@ static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src) struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid) { - struct arm_smmu_l1_ctx_desc *l1_desc; + struct arm_smmu_cdtab_l2 *l2; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (!cd_table->cdtab) + if (!arm_smmu_cdtab_allocated(cd_table)) return NULL;
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) - return &((struct arm_smmu_cd *)cd_table->cdtab)[ssid]; + return &cd_table->linear.table[ssid];
- l1_desc = &cd_table->l1_desc[arm_smmu_cdtab_l1_idx(ssid)]; - if (!l1_desc->l2ptr) + l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)]; + if (!l2) return NULL; - return &l1_desc->l2ptr->cds[arm_smmu_cdtab_l2_idx(ssid)]; + return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)]; }
static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, @@ -1445,30 +1445,25 @@ static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, might_sleep(); iommu_group_mutex_assert(master->dev);
- if (!cd_table->cdtab) { + if (!arm_smmu_cdtab_allocated(cd_table)) { if (arm_smmu_alloc_cd_tables(master)) return NULL; }
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { unsigned int idx = arm_smmu_cdtab_l1_idx(ssid); - struct arm_smmu_l1_ctx_desc *l1_desc; + struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx];
- l1_desc = &cd_table->l1_desc[idx]; - if (!l1_desc->l2ptr) { - struct arm_smmu_cdtab_l1 *dst; + if (!*l2ptr) { dma_addr_t l2ptr_dma; - size_t size;
- size = CTXDESC_L2_ENTRIES * sizeof(struct arm_smmu_cd); - l1_desc->l2ptr = dma_alloc_coherent(smmu->dev, size, - &l2ptr_dma, - GFP_KERNEL); - if (!l1_desc->l2ptr) + *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr), + &l2ptr_dma, GFP_KERNEL); + if (!*l2ptr) return NULL;
- dst = &((struct arm_smmu_cdtab_l1 *)cd_table->cdtab)[idx]; - arm_smmu_write_cd_l1_desc(dst, l2ptr_dma); + arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx], + l2ptr_dma); /* An invalid L1CD can be cached */ arm_smmu_sync_cd(master, ssid, false); } @@ -1586,7 +1581,7 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) struct arm_smmu_cd target = {}; struct arm_smmu_cd *cdptr;
- if (!master->cd_table.cdtab) + if (!arm_smmu_cdtab_allocated(&master->cd_table)) return; cdptr = arm_smmu_get_cd_ptr(master, ssid); if (WARN_ON(!cdptr)) @@ -1608,70 +1603,70 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || max_contexts <= CTXDESC_L2_ENTRIES) { cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; - cd_table->num_l1_ents = max_contexts; + cd_table->linear.num_ents = max_contexts;
- l1size = max_contexts * sizeof(struct arm_smmu_cd); + l1size = max_contexts * sizeof(struct arm_smmu_cd), + cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size, + &cd_table->cdtab_dma, + GFP_KERNEL); + if (!cd_table->linear.table) + return -ENOMEM; } else { cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; - cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts, - CTXDESC_L2_ENTRIES); + cd_table->l2.num_l1_ents = + DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES);
- cd_table->l1_desc = kcalloc(cd_table->num_l1_ents, - sizeof(*cd_table->l1_desc), - GFP_KERNEL); - if (!cd_table->l1_desc) + cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents, + sizeof(*cd_table->l2.l2ptrs), + GFP_KERNEL); + if (!cd_table->l2.l2ptrs) return -ENOMEM;
- l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); - } - - cd_table->cdtab = dma_alloc_coherent(smmu->dev, l1size, - &cd_table->cdtab_dma, GFP_KERNEL); - if (!cd_table->cdtab) { - dev_warn(smmu->dev, "failed to allocate context descriptor\n"); - ret = -ENOMEM; - goto err_free_l1; + l1size = cd_table->l2.num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); + cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size, + &cd_table->cdtab_dma, + GFP_KERNEL); + if (!cd_table->l2.l2ptrs) { + ret = -ENOMEM; + goto err_free_l2ptrs; + } } - return 0;
-err_free_l1: - if (cd_table->l1_desc) { - kfree(cd_table->l1_desc); - cd_table->l1_desc = NULL; - } +err_free_l2ptrs: + kfree(cd_table->l2.l2ptrs); + cd_table->l2.l2ptrs = NULL; return ret; }
static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) { int i; - size_t l1size; struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (cd_table->l1_desc) { - for (i = 0; i < cd_table->num_l1_ents; i++) { - dma_addr_t dma_handle; - - if (!cd_table->l1_desc[i].l2ptr) + if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) { + for (i = 0; i < cd_table->l2.num_l1_ents; i++) { + if (!cd_table->l2.l2ptrs[i]) continue;
- dma_handle = arm_smmu_cd_l1_get_desc(&( - (struct arm_smmu_cdtab_l1 *)cd_table->cdtab)[i]); dma_free_coherent(smmu->dev, - sizeof(*cd_table->l1_desc[i].l2ptr), - cd_table->l1_desc[i].l2ptr, - dma_handle); + sizeof(*cd_table->l2.l2ptrs[i]), + cd_table->l2.l2ptrs[i], + arm_smmu_cd_l1_get_desc(&cd_table->l2.l1tab[i])); } - kfree(cd_table->l1_desc); + kfree(cd_table->l2.l2ptrs);
- l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); + dma_free_coherent(smmu->dev, + cd_table->l2.num_l1_ents * + sizeof(struct arm_smmu_cdtab_l1), + cd_table->l2.l1tab, cd_table->cdtab_dma); } else { - l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cd); + dma_free_coherent(smmu->dev, + cd_table->linear.num_ents * + sizeof(struct arm_smmu_cd), + cd_table->linear.table, cd_table->cdtab_dma); } - - dma_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma); }
/* Stream table manipulation functions */ @@ -3573,7 +3568,7 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_disable_pasid(master); arm_smmu_remove_master(master); - if (master->cd_table.cdtab) + if (arm_smmu_cdtab_allocated(&master->cd_table)) arm_smmu_free_cd_tables(master); kfree(master); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index fdd9f4555ec2..45cad8ba28c4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -669,15 +669,19 @@ struct arm_smmu_ctx_desc { u16 asid; };
-struct arm_smmu_l1_ctx_desc { - struct arm_smmu_cdtab_l2 *l2ptr; -}; - struct arm_smmu_ctx_desc_cfg { - __le64 *cdtab; + union { + struct { + struct arm_smmu_cd *table; + unsigned int num_ents; + } linear; + struct { + struct arm_smmu_cdtab_l1 *l1tab; + struct arm_smmu_cdtab_l2 **l2ptrs; + unsigned int num_l1_ents; + } l2; + }; dma_addr_t cdtab_dma; - struct arm_smmu_l1_ctx_desc *l1_desc; - unsigned int num_l1_ents; unsigned int used_ssids; u8 in_ste; u8 s1fmt; @@ -685,6 +689,12 @@ struct arm_smmu_ctx_desc_cfg { u8 s1cdmax; };
+static inline bool +arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg) +{ + return cfg->linear.table || cfg->l2.l1tab; +} + /* True if the cd table has SSIDS > 0 in use. */ static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table) {