euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8RJAT?from=project-issue CVE: NA
----------------------------------
arm64 cannot support virtualization pass-through feature when using the 3408iMR/3416iMR raid card. For solving the problem, we add two functions: 1.we prepare init bypass level2 entry for specific devices when smmu uses 2 level streamid entry. 2.we add smmu.bypassdev cmdline to allow SMMU bypass streams for some specific devices.
usage: 3408iMRraid: smmu.bypassdev=0x1000:0x17 3416iMRraid: smmu.bypassdev=0x1000:0x15
Signed-off-by: zhangnaichuan zhangnaichuan@huawei.com Signed-off-by: luochunshengluochunsheng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/iommu/Kconfig | 9 ++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 114 +++++++++++++++++++- 2 files changed, 120 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 2ecee0122..802c8d915 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -501,4 +501,13 @@ config SPRD_IOMMU
Say Y here if you want to use the multimedia devices listed above.
+config SMMU_BYPASS_DEV + bool "SMMU bypass streams for some specific devices" + depends on ARM_SMMU_V3=y + help + according smmu.bypassdev cmdline, SMMU performs attribute + transformation only,with no address translation. + E.g:SMMU allow iMR3408/3416 Raid bypass at DMA default domain + to support other devices Virtualization through. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 5055a6664..c92fdce5a 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -41,6 +41,59 @@ module_param(disable_msipolling, bool, 0444); MODULE_PARM_DESC(disable_msipolling, "Disable MSI-based polling for CMD_SYNC completion.");
+#ifdef CONFIG_SMMU_BYPASS_DEV +struct smmu_bypass_device { + unsigned short vendor; + unsigned short device; +}; +#define MAX_CMDLINE_SMMU_BYPASS_DEV 16 + +static struct smmu_bypass_device smmu_bypass_devices[MAX_CMDLINE_SMMU_BYPASS_DEV]; +static int smmu_bypass_devices_num; + +static int __init arm_smmu_bypass_dev_setup(char *str) +{ + unsigned short vendor; + unsigned short device; + int ret; + + if (!str) + return -EINVAL; + + ret = sscanf(str, "%hx:%hx", &vendor, &device); + if (ret != 2) + return -EINVAL; + + if (smmu_bypass_devices_num >= MAX_CMDLINE_SMMU_BYPASS_DEV) + return -ERANGE; + + smmu_bypass_devices[smmu_bypass_devices_num].vendor = vendor; + smmu_bypass_devices[smmu_bypass_devices_num].device = device; + smmu_bypass_devices_num++; + + return 0; +} + +__setup("smmu.bypassdev=", arm_smmu_bypass_dev_setup); + +static int arm_smmu_bypass_dev_domain_type(struct device *dev) +{ + int i; + struct pci_dev *pdev = to_pci_dev(dev); + + for (i = 0; i < smmu_bypass_devices_num; i++) { + if ((smmu_bypass_devices[i].vendor == pdev->vendor) && + (smmu_bypass_devices[i].device == pdev->device)) { + dev_info(dev, "device 0x%hx:0x%hx uses identity mapping.", + pdev->vendor, pdev->device); + return IOMMU_DOMAIN_IDENTITY; + } + } + + return 0; +} +#endif + enum arm_smmu_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, @@ -2841,14 +2894,19 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
static int arm_smmu_def_domain_type(struct device *dev) { + int ret = 0; + if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev);
if (IS_HISI_PTT_DEVICE(pdev)) return IOMMU_DOMAIN_IDENTITY; +#ifdef CONFIG_SMMU_BYPASS_DEV + ret = arm_smmu_bypass_dev_domain_type(dev); +#endif }
- return 0; + return ret; }
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) @@ -3006,12 +3064,56 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) return 0; }
+#ifdef CONFIG_SMMU_BYPASS_DEV +static void arm_smmu_install_bypass_ste_for_dev(struct arm_smmu_device *smmu, + u32 sid) +{ + u64 val; + __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + + if (!step) + return; + + val = STRTAB_STE_0_V; + val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); + step[0] = cpu_to_le64(val); + step[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); + step[2] = 0; +} + +static int arm_smmu_prepare_init_l2_strtab(struct device *dev, void *data) +{ + u32 sid; + int ret; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = (struct arm_smmu_device *)data; + + if (!arm_smmu_def_domain_type(dev)) + return 0; + + pdev = to_pci_dev(dev); + sid = PCI_DEVID(pdev->bus->number, pdev->devfn); + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + ret = arm_smmu_init_l2_strtab(smmu, sid); + if (ret) + return ret; + + arm_smmu_install_bypass_ste_for_dev(smmu, sid); + + return 0; +} +#endif + static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; + int ret;
/* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); @@ -3040,8 +3142,14 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size); reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); cfg->strtab_base_cfg = reg; - - return arm_smmu_init_l1_strtab(smmu); + ret = arm_smmu_init_l1_strtab(smmu); +#ifdef CONFIG_SMMU_BYPASS_DEV + if (!ret && smmu_bypass_devices_num) { + ret = bus_for_each_dev(&pci_bus_type, NULL, (void *)smmu, + arm_smmu_prepare_init_l2_strtab); + } +#endif + return ret; }
static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)