Signed-off-by: l00500167 lijunbin4@huawei.com --- arch/arm64/kvm/mmu.c | 23 ++++ arch/arm64/kvm/virtcca_cvm.c | 178 ++++++++++++++++++++++++++++++- drivers/iommu/io-pgtable-arm.c | 60 +++++++++++ drivers/iommu/iommu.c | 11 ++ drivers/pci/access.c | 33 ++++++ drivers/pci/msi/msi.c | 90 ++++++++++++++-- drivers/vfio/pci/vfio_pci_core.c | 5 + drivers/vfio/pci/vfio_pci_rdwr.c | 61 +++++++++++ drivers/vfio/vfio_iommu_type1.c | 17 +++ drivers/vfio/vfio_main.c | 18 ++++ include/linux/iommu.h | 5 + include/linux/pci.h | 4 + include/linux/vfio.h | 5 + include/uapi/linux/vfio.h | 3 + virt/kvm/vfio.c | 121 ++++++++++++++++++++- virt/kvm/vfio.h | 10 ++ 16 files changed, 633 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index f3ab2c39f7..48153777cc 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1395,6 +1395,23 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) return vma->vm_flags & VM_MTE_ALLOWED; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, + unsigned long map_size, enum kvm_pgtable_prot prot) +{ + struct page *dst_page = pfn_to_page(pfn); + phys_addr_t dst_phys = page_to_phys(dst_page); + + if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W))) + return -EFAULT; + + if (prot & KVM_PGTABLE_PROT_DEVICE) + return kvm_cvm_map_ipa_mmio(kvm, ipa, dst_phys, map_size); + + return 0; +} +#endif + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) @@ -1605,6 +1622,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, memcache, KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_is_virtcca_cvm(kvm)) { + ret = kvm_cvm_map_ipa(kvm, fault_ipa, pfn, vma_pagesize, prot); + WARN_ON(ret); + } +#endif
/* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) { diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c index f326dc4394..227f70d01a 100644 --- a/arch/arm64/kvm/virtcca_cvm.c +++ b/arch/arm64/kvm/virtcca_cvm.c @@ -4,6 +4,7 @@ */ #include <linux/kvm_host.h> #include <linux/kvm.h> +#include <linux/vfio.h> #include <asm/kvm_tmi.h> #include <asm/kvm_pgtable.h> #include <asm/kvm_emulate.h> @@ -12,6 +13,7 @@ #include <linux/arm-smccc.h> #include <kvm/arm_hypercalls.h> #include <kvm/arm_psci.h> +#include "../virt/kvm/vfio.h"
/* Protects access to cvm_vmid_bitmap */ static DEFINE_SPINLOCK(cvm_vmid_lock); @@ -155,6 +157,27 @@ static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) return NO_NUMA; }
+int cvm_arm_smmu_domain_set_kvm(struct iommu_group *group) +{ + struct arm_smmu_domain *arm_smmu_domain = NULL; + struct iommu_domain *domain; + struct kvm *kvm; + + domain = iommu_group_get_domain(group); + if (!domain) + return -ENXIO; + + arm_smmu_domain = to_smmu_domain(domain); + if (arm_smmu_domain->kvm) + return 0; + + kvm = arm_smmu_get_kvm(arm_smmu_domain); + if (kvm && kvm_is_virtcca_cvm(kvm)) + arm_smmu_domain->kvm = kvm; + + return 0; +} + int kvm_arm_create_cvm(struct kvm *kvm) { int ret; @@ -210,10 +233,18 @@ void kvm_destroy_cvm(struct kvm *kvm) { struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; uint32_t cvm_vmid; + struct arm_smmu_domain *arm_smmu_domain; + struct list_head smmu_domain_group_list;
if (!cvm) return;
+ kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list); + list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { + if (arm_smmu_domain->kvm && arm_smmu_domain->kvm == kvm) + arm_smmu_domain->kvm = NULL; + } + cvm_vmid = cvm->cvm_vmid; kfree(cvm->params); cvm->params = NULL; @@ -228,6 +259,7 @@ void kvm_destroy_cvm(struct kvm *kvm) if (!tmi_cvm_destroy(cvm->rd)) kvm_info("KVM has destroyed cVM: %d\n", cvm->cvm_vmid);
+ cvm->is_mapped = false; kfree(cvm); kvm->arch.virtcca_cvm = NULL; } @@ -262,6 +294,133 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, return 0; }
+static int kvm_cvm_map_unmap_ipa_internal(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t pa, unsigned long map_size, uint32_t is_map) +{ + struct virtcca_cvm *virtcca_cvm = (struct virtcca_cvm *)kvm->arch.virtcca_cvm; + phys_addr_t rd = virtcca_cvm->rd; + unsigned long ipa = ipa_base; + unsigned long phys = pa; + unsigned long size; + int map_level = 3; + int ret = 0; + + for (size = 0; size < map_size; size += PAGE_SIZE) { + if (is_map) + ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); + else + ret = tmi_mmio_unmap(rd, ipa, CVM_TTT_MAX_LEVEL); + + if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { + /* Create missing TTTs and retry */ + int level_fault = TMI_RETURN_INDEX(ret); + + if (is_map) { + ret = kvm_cvm_create_ttt_levels(kvm, virtcca_cvm, ipa, level_fault, + CVM_TTT_MAX_LEVEL, NULL); + if (ret) + goto err; + ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); + } else { + ret = tmi_mmio_unmap(rd, ipa, level_fault); + } + } + + WARN_ON(ret); + if (ret) + goto err; + + if (size + PAGE_SIZE >= map_size) + break; + ipa += PAGE_SIZE; + phys += PAGE_SIZE; + } + + if (WARN_ON(ret)) + goto err; + return 0; + +err: + while (size > 0) { + phys -= PAGE_SIZE; + size -= PAGE_SIZE; + ipa -= PAGE_SIZE; + + WARN_ON(tmi_data_destroy(rd, ipa, map_level)); + } + return -ENXIO; +} + +int kvm_cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t pa, unsigned long map_size, uint32_t is_map) +{ + return kvm_cvm_map_unmap_ipa_internal(kvm, ipa_base, pa, map_size, is_map); +} + +int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t pa, unsigned long map_size) +{ + struct virtcca_cvm *virtcca_cvm = (struct virtcca_cvm *)kvm->arch.virtcca_cvm; + phys_addr_t rd = virtcca_cvm->rd; + unsigned long ipa = ipa_base; + unsigned long phys = pa; + unsigned long size; + int map_level = 3; + int ret = 0; + gfn_t gfn; + kvm_pfn_t pfn; + + if (WARN_ON(!IS_ALIGNED(ipa, map_size))) + return -EINVAL; + + for (size = 0; size < map_size; size += PAGE_SIZE) { + ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); + if (ret == TMI_ERROR_TTT_CREATED) { + ret = 0; + goto label; + } + if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { + /* Create missing TTTs and retry */ + int level_fault = TMI_RETURN_INDEX(ret); + + ret = kvm_cvm_create_ttt_levels(kvm, virtcca_cvm, ipa, level_fault, + CVM_TTT_MAX_LEVEL, NULL); + + if (ret) + goto err; + ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); + } + + WARN_ON(ret); + if (ret) + goto err; +label: + if (size + PAGE_SIZE >= map_size) + break; + + ipa += PAGE_SIZE; + gfn = gpa_to_gfn(ipa); + pfn = gfn_to_pfn(kvm, gfn); + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); + phys = (uint64_t)__pfn_to_phys(pfn); + + } + if (WARN_ON(ret)) + goto err; + + return 0; + +err: + while (size > 0) { + phys -= PAGE_SIZE; + size -= PAGE_SIZE; + ipa -= PAGE_SIZE; + WARN_ON(tmi_data_destroy(rd, ipa, map_level)); + } + return -ENXIO; +} + static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct virtcca_cvm *cvm, unsigned long ipa, int level, struct page *src_page, u64 numa_set) { @@ -550,18 +709,35 @@ int kvm_cvm_map_range(struct kvm *kvm) } }
+ cvm->is_mapped = true; return ret; }
static int kvm_activate_cvm(struct kvm *kvm) { + int ret; + struct arm_smmu_domain *arm_smmu_domain; + struct list_head smmu_domain_group_list; struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm;
if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) return -EINVAL;
- if (kvm_cvm_map_range(kvm)) + if (!cvm->is_mapped && kvm_cvm_map_range(kvm)) + return -EFAULT; + + if (kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list)) { + kvm_err("tmi activate cvm: get arm smmu domain failed!\n"); return -EFAULT; + } + + list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { + if (arm_smmu_domain && arm_smmu_domain->secure) { + ret = arm_smmu_tmi_dev_attach(arm_smmu_domain, kvm); + if (ret) + return ret; + } + }
if (tmi_cvm_activate(cvm->rd)) { kvm_err("tmi_cvm_activate failed!\n"); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index fb54baed3f..36eec71028 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -22,6 +22,16 @@
#include "io-pgtable-arm.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_tmm.h> +#include <linux/kvm_host.h> +#include <linux/kvm.h> +#include <asm/kvm.h> +#include "../virt/kvm/vfio.h" +#endif + #define ARM_LPAE_MAX_ADDR_BITS 52 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 #define ARM_LPAE_MAX_LEVELS 4 @@ -498,6 +508,41 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, return pte; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static int arm_lpae_cvm_map_unmap_pages(struct arm_lpae_io_pgtable *data, unsigned long iova, + phys_addr_t paddr, size_t size, uint32_t is_map) +{ + struct arm_smmu_domain *smmu_domain = (struct arm_smmu_domain *)data->iop.cookie; + int ret = 0; + struct kvm *kvm; + u64 loader_start; + u64 ram_size; + static bool smmu_mapped; + + kvm = smmu_domain->kvm; + if (kvm && kvm_is_virtcca_cvm(kvm) && virtcca_cvm_state(kvm) != CVM_STATE_DYING) { + struct virtcca_cvm *virtcca_cvm = (struct virtcca_cvm *)kvm->arch.virtcca_cvm; + + loader_start = virtcca_cvm->loader_start; + ram_size = virtcca_cvm->ram_size; + if (!smmu_mapped && iova >= loader_start && + iova < loader_start + ram_size && + is_map && !virtcca_cvm->is_mapped) { + ret = kvm_cvm_map_range(kvm); + if (!ret) + smmu_mapped = true; + WARN_ON(ret); + } else if (iova < loader_start) { + if (iova == CVM_MSI_ORIG_IOVA) + iova += CVM_MSI_IOVA_OFFSET; + ret = kvm_cvm_map_unmap_ipa_range(kvm, iova, paddr, size, is_map); + WARN_ON(ret); + } + } + return ret; +} +#endif + static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int iommu_prot, gfp_t gfp, size_t *mapped) @@ -522,6 +567,11 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot); + +#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_lpae_cvm_map_unmap_pages(data, iova, paddr, pgsize * pgcount, true); +#endif + ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, ptep, gfp, mapped); /* @@ -708,6 +758,16 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov if (WARN_ON(iaext)) return 0;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + int ret; + + ret = arm_lpae_cvm_map_unmap_pages(data, iova, 0, pgsize * pgcount, false); + WARN_ON(ret); + if (ret) + pr_err("%s %d failed to unmap pages, iova %lx, size %lx\n", + __func__, __LINE__, iova, pgsize); +#endif + return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, data->start_level, ptep); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 28f63ad432..07a084c9d9 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -592,6 +592,17 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list return ret; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +struct iommu_domain *iommu_group_get_domain(struct iommu_group *iommu_group) +{ + if (iommu_group) + return iommu_group->domain; + + return NULL; +} +EXPORT_SYMBOL_GPL(iommu_group_get_domain); +#endif + int iommu_probe_device(struct device *dev) { const struct iommu_ops *ops; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d..813198abfc 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -7,6 +7,10 @@
#include "pci.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + /* * This interrupt-safe spinlock protects all accesses to PCI * configuration space. @@ -86,6 +90,19 @@ int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, if (!addr) return PCIBIOS_DEVICE_NOT_FOUND;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (is_cc_dev((bus->number << 8) | devfn)) { + if (size == 1) + *val = tmi_mmio_read(va_to_pa(addr), 8, ((bus->number << 8) | devfn)); + else if (size == 2) + *val = tmi_mmio_read(va_to_pa(addr), 16, ((bus->number << 8) | devfn)); + else + *val = tmi_mmio_read(va_to_pa(addr), 32, ((bus->number << 8) | devfn)); + + return PCIBIOS_SUCCESSFUL; + } +#endif + if (size == 1) *val = readb(addr); else if (size == 2) @@ -106,6 +123,22 @@ int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, if (!addr) return PCIBIOS_DEVICE_NOT_FOUND;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (is_cc_dev((bus->number << 8) | devfn)) { + if (size == 1) + WARN_ON(tmi_mmio_write(va_to_pa(addr), val, + 8, ((bus->number << 8) | devfn))); + else if (size == 2) + WARN_ON(tmi_mmio_write(va_to_pa(addr), val, + 16, ((bus->number << 8) | devfn))); + else + WARN_ON(tmi_mmio_write(va_to_pa(addr), val, + 32, ((bus->number << 8) | devfn))); + + return PCIBIOS_SUCCESSFUL; + } +#endif + if (size == 1) writeb(val, addr); else if (size == 2) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 161c3ac171..571b2d0890 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -13,6 +13,11 @@ #include "../pci.h" #include "msi.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmm.h> +#include <asm/kvm_tmi.h> +#endif + int pci_msi_enable = 1; int pci_msi_ignore_mask;
@@ -159,9 +164,23 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) return;
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA); +#ifdef CONFIG_HISI_VIRTCCA_HOST + u64 pbase = va_to_pa(base); + + if (dev != NULL && is_cc_dev(pci_dev_id(dev))) { + msg->address_lo = tmi_mmio_read(pbase + PCI_MSIX_ENTRY_LOWER_ADDR, + 32, pci_dev_id(dev)); + msg->address_hi = tmi_mmio_read(pbase + PCI_MSIX_ENTRY_UPPER_ADDR, + 32, pci_dev_id(dev)); + msg->data = tmi_mmio_read(pbase + PCI_MSIX_ENTRY_DATA, 32, pci_dev_id(dev)); + } else { +#endif + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); +#ifdef CONFIG_HISI_VIRTCCA_HOST + } +#endif } else { int pos = dev->msi_cap; u16 data; @@ -221,15 +240,41 @@ static inline void pci_write_msg_msix(struct msi_desc *desc, struct msi_msg *msg if (unmasked) pci_msix_write_vector_ctrl(desc, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT);
- writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); - writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA); +#ifdef CONFIG_HISI_VIRTCCA_HOST + u64 pbase = va_to_pa(base); + + struct pci_dev *pdev = (desc->dev != NULL && + dev_is_pci(desc->dev)) ? to_pci_dev(desc->dev) : NULL; + + if (pdev != NULL && is_cc_dev(pci_dev_id(pdev))) { + u64 addr = (u64)msg->address_lo | ((u64)msg->address_hi << 32); + + addr += CVM_MSI_IOVA_OFFSET; + tmi_mmio_write(pbase + PCI_MSIX_ENTRY_LOWER_ADDR, + lower_32_bits(addr), 32, pci_dev_id(pdev)); + tmi_mmio_write(pbase + PCI_MSIX_ENTRY_UPPER_ADDR, + upper_32_bits(addr), 32, pci_dev_id(pdev)); + tmi_mmio_write(pbase + PCI_MSIX_ENTRY_DATA, + msg->data, 32, pci_dev_id(pdev)); + } else { +#endif + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); +#ifdef CONFIG_HISI_VIRTCCA_HOST + } +#endif
if (unmasked) pci_msix_write_vector_ctrl(desc, ctrl);
/* Ensure that the writes are visible in the device */ - readl(base + PCI_MSIX_ENTRY_DATA); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (pdev != NULL && is_cc_dev(pci_dev_id(pdev))) + tmi_mmio_read(va_to_pa(pbase + PCI_MSIX_ENTRY_DATA), 32, pci_dev_id(pdev)); + else +#endif + readl(base + PCI_MSIX_ENTRY_DATA); }
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) @@ -638,7 +683,12 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
if (desc->pci.msi_attrib.can_mask) { void __iomem *addr = pci_msix_desc_addr(desc); - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (is_cc_dev(pci_dev_id(dev))) + desc->pci.msix_ctrl = tmi_mmio_read(va_to_pa(addr + PCI_MSIX_ENTRY_VECTOR_CTRL), + 32, pci_dev_id(dev)); + else +#endif desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } } @@ -690,6 +740,23 @@ static void msix_mask_all(void __iomem *base, int tsize) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static void msix_mask_all_cc(void __iomem *base, int tsize, u64 dev_num) +{ + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; + int i; + u64 pbase = va_to_pa(base); + + if (pci_msi_ignore_mask) + return; + + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) { + tmi_mmio_write(pbase + PCI_MSIX_ENTRY_VECTOR_CTRL, + ctrl, 32, dev_num); + } +} +#endif + static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { @@ -776,7 +843,12 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, * which takes the MSI-X mask bits into account even * when MSI-X is disabled, which prevents MSI delivery. */ - msix_mask_all(dev->msix_base, tsize); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (is_cc_dev(pci_dev_id(dev))) + msix_mask_all_cc(dev->msix_base, tsize, pci_dev_id(dev)); + else +#endif + msix_mask_all(dev->msix_base, tsize); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 1929103ee5..0f3976692b 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -975,6 +975,11 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev, if (vdev->reset_works) info.flags |= VFIO_DEVICE_FLAGS_RESET;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (is_cc_dev(pci_dev_id(vdev->pdev))) + info.flags |= VFIO_DEVICE_FLAGS_SECURE; +#endif + info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index e27de61ac9..e4deb868dc 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -18,6 +18,9 @@ #include <linux/vgaarb.h>
#include "vfio_pci_priv.h" +#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif
#ifdef __LITTLE_ENDIAN #define vfio_ioread64 ioread64 @@ -37,6 +40,34 @@ #define vfio_ioread8 ioread8 #define vfio_iowrite8 iowrite8
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#define VFIO_IOWRITE(size) \ +static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \ + bool test_mem, u##size val, void __iomem *io) \ +{ \ + struct pci_dev *pdev = vdev->pdev; \ + bool cc_dev = pdev == NULL ? false : is_cc_dev(pci_dev_id(pdev)); \ + \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + if (cc_dev) { \ + WARN_ON(tmi_mmio_write(va_to_pa(io), val, size, pci_dev_id(pdev))); \ + } else { \ + vfio_iowrite##size(val, io); \ + } \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} +#else /* not CONFIG_HISI_VIRTCCA_HOST */ #define VFIO_IOWRITE(size) \ static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \ bool test_mem, u##size val, void __iomem *io) \ @@ -56,6 +87,7 @@ static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \ \ return 0; \ } +#endif /* CONFIG_HISI_VIRTCCA_HOST */
VFIO_IOWRITE(8) VFIO_IOWRITE(16) @@ -64,6 +96,34 @@ VFIO_IOWRITE(32) VFIO_IOWRITE(64) #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#define VFIO_IOREAD(size) \ +static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \ + bool test_mem, u##size *val, void __iomem *io) \ +{ \ + struct pci_dev *pdev = vdev->pdev; \ + bool cc_dev = pdev == NULL ? false : is_cc_dev(pci_dev_id(pdev)); \ + \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + if (cc_dev) { \ + *val = tmi_mmio_read(va_to_pa(io), size, pci_dev_id(pdev)); \ + } else { \ + *val = vfio_ioread##size(io); \ + } \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} +#else /* not CONFIG_HISI_VIRTCCA_HOST */ #define VFIO_IOREAD(size) \ static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \ bool test_mem, u##size *val, void __iomem *io) \ @@ -83,6 +143,7 @@ static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \ \ return 0; \ } +#endif
VFIO_IOREAD(8) VFIO_IOREAD(16) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 9c4adf11db..021f054088 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -77,6 +77,9 @@ struct vfio_iommu { bool dirty_page_tracking; struct list_head emulated_iommu_groups; bool dirty_log_get_no_clear; +#ifdef CONFIG_HISI_VIRTCCA_HOST + bool secure; +#endif };
struct vfio_domain { @@ -2454,6 +2457,11 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, goto out_domain; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (iommu->secure) + domain->domain->owner->iommu_enable_secure(domain->domain); +#endif + ret = iommu_attach_group(domain->domain, group->iommu_group); if (ret) goto out_domain; @@ -2807,6 +2815,12 @@ static void *vfio_iommu_type1_open(unsigned long arg) case VFIO_TYPE1v2_IOMMU: iommu->v2 = true; break; +#ifdef CONFIG_HISI_VIRTCCA_HOST + case VFIO_TYPE1v2_S_IOMMU: + iommu->v2 = true; + iommu->secure = true; + break; +#endif default: kfree(iommu); return ERR_PTR(-EINVAL); @@ -2898,6 +2912,9 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, switch (arg) { case VFIO_TYPE1_IOMMU: case VFIO_TYPE1v2_IOMMU: +#ifdef CONFIG_HISI_VIRTCCA_HOST + case VFIO_TYPE1v2_S_IOMMU: +#endif case VFIO_TYPE1_NESTING_IOMMU: case VFIO_UNMAP_ALL: return 1; diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index a96d97da36..0d45ed89ab 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -318,6 +318,24 @@ static int __vfio_register_dev(struct vfio_device *device, return ret; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +bool vfio_iommu_group(struct vfio_group *vfio_group, struct iommu_group *iommu_group) +{ + if (vfio_group->iommu_group == iommu_group) + return true; + return false; +} +EXPORT_SYMBOL_GPL(vfio_iommu_group); + +struct iommu_group *vfio_get_iommu_group(struct vfio_group *vfio_group) +{ + if (vfio_group) + return vfio_group->iommu_group; + return NULL; +} +EXPORT_SYMBOL_GPL(vfio_get_iommu_group); +#endif + int vfio_register_group_dev(struct vfio_device *device) { return __vfio_register_dev(device, VFIO_IOMMU); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 16791cf835..2dac030492 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1152,6 +1152,11 @@ static inline void *dev_iommu_priv_get(struct device *dev) void dev_iommu_priv_set(struct device *dev, void *priv);
extern struct mutex iommu_probe_device_lock; + +#ifdef CONFIG_HISI_VIRTCCA_HOST +struct iommu_domain *iommu_group_get_domain(struct iommu_group *iommu_group); +#endif + int iommu_probe_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); diff --git a/include/linux/pci.h b/include/linux/pci.h index 8838b58a6f..a2a532703c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2734,4 +2734,8 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); WARN_ONCE(condition, "%s %s: " fmt, \ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+#ifdef CONFIG_HISI_VIRTCCA_HOST +bool is_cc_dev(u32 sid); +#endif + #endif /* LINUX_PCI_H */ diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 5ac5f182ce..d231eb5bf7 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -123,6 +123,11 @@ struct vfio_device_ops { void __user *arg, size_t argsz); };
+#ifdef CONFIG_HISI_VIRTCCA_HOST +extern bool vfio_iommu_group(struct vfio_group *vfio_group, struct iommu_group *iommu_group); +extern struct iommu_group *vfio_get_iommu_group(struct vfio_group *vfio_group); +#endif + #if IS_ENABLED(CONFIG_IOMMUFD) struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev); int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx); diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 2b78d03c0c..c95f03258a 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -25,6 +25,8 @@ #define VFIO_TYPE1_IOMMU 1 #define VFIO_SPAPR_TCE_IOMMU 2 #define VFIO_TYPE1v2_IOMMU 3 +#define VFIO_TYPE1v2_S_IOMMU 12 + /* * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This * capability is subject to change as groups are added or removed. @@ -224,6 +226,7 @@ struct vfio_device_info { #define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */ #define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */ #define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */ +#define VFIO_DEVICE_FLAGS_SECURE (1 << 9) __u32 num_regions; /* Max region index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */ __u32 cap_offset; /* Offset within info struct of first cap */ diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index ca24ce1209..4b607ae91d 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -17,6 +17,10 @@ #include <linux/vfio.h> #include "vfio.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_emulate.h> +#endif + #ifdef CONFIG_SPAPR_TCE_IOMMU #include <asm/kvm_ppc.h> #endif @@ -80,7 +84,7 @@ static bool kvm_vfio_file_is_valid(struct file *file) return ret; }
-#ifdef CONFIG_SPAPR_TCE_IOMMU +#if defined CONFIG_SPAPR_TCE_IOMMU || defined CONFIG_HISI_VIRTCCA_HOST static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) { struct iommu_group *(*fn)(struct file *file); @@ -96,7 +100,9 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
return ret; } +#endif
+#ifdef CONFIG_SPAPR_TCE_IOMMU static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, struct kvm_vfio_file *kvf) { @@ -142,6 +148,9 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + struct iommu_group *iommu_group; +#endif struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; struct file *filp; @@ -179,6 +188,18 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) kvm_vfio_file_set_kvm(kvf->file, dev->kvm); kvm_vfio_update_coherency(dev);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + iommu_group = kvm_vfio_file_iommu_group(filp); + if (!iommu_group) { + ret = -ENXIO; + goto out_unlock; + } + if (cvm_arm_smmu_domain_set_kvm(iommu_group)) { + ret = -ENXIO; + goto out_unlock; + } +#endif + out_unlock: mutex_unlock(&kv->lock); out_fput: @@ -392,3 +413,101 @@ void kvm_vfio_ops_exit(void) { kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); } + +#ifdef CONFIG_HISI_VIRTCCA_HOST +struct kvm *arm_smmu_get_kvm(struct arm_smmu_domain *domain) +{ + int ret = -1; + struct kvm *kvm; + struct kvm_device *dev; + struct kvm_vfio *kv; + struct kvm_vfio_file *kvf; + struct iommu_group *iommu_group; + + unsigned long flags; + struct arm_smmu_master *master; + + spin_lock_irqsave(&domain->devices_lock, flags); + list_for_each_entry(master, &domain->devices, domain_head) { + if (master && master->num_streams >= 0) { + ret = 0; + break; + } + } + spin_unlock_irqrestore(&domain->devices_lock, flags); + if (ret) + return NULL; + + ret = -1; + iommu_group = master->dev->iommu_group; + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + mutex_lock(&kvm->lock); + list_for_each_entry(dev, &kvm->devices, vm_node) { + if (dev->ops && strcmp(dev->ops->name, "kvm-vfio") == 0) { + kv = (struct kvm_vfio *)dev->private; + mutex_lock(&kv->lock); + list_for_each_entry(kvf, &kv->file_list, node) { + if (kvm_vfio_file_iommu_group(kvf->file) == iommu_group) { + ret = 0; + break; + } + } + mutex_unlock(&kv->lock); + if (!ret) + break; + } + } + mutex_unlock(&kvm->lock); + if (!ret) + break; + } + mutex_unlock(&kvm_lock); + + if (ret) + return NULL; + return kvm; +} + +void find_arm_smmu_domain(struct kvm_vfio_file *kvf, struct list_head *smmu_domain_group_list) +{ + struct iommu_group *iommu_group; + int ret = 0; + struct arm_smmu_domain *arm_smmu_domain = NULL; + struct arm_smmu_domain *arm_smmu_domain_node = NULL; + + iommu_group = kvm_vfio_file_iommu_group(kvf->file); + arm_smmu_domain = to_smmu_domain(iommu_group_get_domain(iommu_group)); + list_for_each_entry(arm_smmu_domain_node, + smmu_domain_group_list, node) { + if (arm_smmu_domain_node == arm_smmu_domain) { + ret = -1; + break; + } + } + if (!ret) + list_add_tail(&arm_smmu_domain->node, smmu_domain_group_list); +} + +int kvm_get_arm_smmu_domain(struct kvm *kvm, struct list_head *smmu_domain_group_list) +{ + struct kvm_device *dev; + struct kvm_vfio *kv; + struct kvm_vfio_file *kvf; + + INIT_LIST_HEAD(smmu_domain_group_list); + + list_for_each_entry(dev, &kvm->devices, vm_node) { + if (dev->ops && strcmp(dev->ops->name, "kvm-vfio") == 0) { + kv = (struct kvm_vfio *)dev->private; + mutex_lock(&kv->lock); + list_for_each_entry(kvf, &kv->file_list, node) { + find_arm_smmu_domain(kvf, smmu_domain_group_list); + } + mutex_unlock(&kv->lock); + } + } + + return 0; +} +#endif diff --git a/virt/kvm/vfio.h b/virt/kvm/vfio.h index e130a4a035..43c303bcf6 100644 --- a/virt/kvm/vfio.h +++ b/virt/kvm/vfio.h @@ -2,6 +2,12 @@ #ifndef __KVM_VFIO_H #define __KVM_VFIO_H
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <linux/kvm_host.h> +#include "../drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h" +#include "../drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h" +#endif + #ifdef CONFIG_KVM_VFIO int kvm_vfio_ops_init(void); void kvm_vfio_ops_exit(void); @@ -15,4 +21,8 @@ static inline void kvm_vfio_ops_exit(void) } #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST +struct kvm *arm_smmu_get_kvm(struct arm_smmu_domain *domain); +int kvm_get_arm_smmu_domain(struct kvm *kvm, struct list_head *smmu_domain_group_list); +#endif #endif