From: Kunkun Jiang jiangkunkun@huawei.com
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I61SPO CVE: NA
--------------------------------
This reverts commit 15700dc0010f823a62c8d77f693ce9ad121f75c6.
Signed-off-by: Kunkun Jiang jiangkunkun@huawei.com Reviewed-by: Keqian Zhu zhukeqian1@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/iommu/dma-iommu.c | 180 +------------------------------------- include/linux/dma-iommu.h | 16 ---- 2 files changed, 4 insertions(+), 192 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 50b3e3a72a00..d1539b7399a9 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -27,15 +27,12 @@ struct iommu_dma_msi_page { struct list_head list; dma_addr_t iova; - dma_addr_t gpa; phys_addr_t phys; - size_t s1_granule; };
enum iommu_dma_cookie_type { IOMMU_DMA_IOVA_COOKIE, IOMMU_DMA_MSI_COOKIE, - IOMMU_DMA_NESTED_MSI_COOKIE, };
struct iommu_dma_cookie { @@ -47,8 +44,6 @@ struct iommu_dma_cookie { dma_addr_t msi_iova; }; struct list_head msi_page_list; - /* used in nested mode only */ - spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; @@ -67,7 +62,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } @@ -101,17 +95,14 @@ EXPORT_SYMBOL(iommu_get_dma_cookie); * * Users who manage their own IOVA allocation and do not want DMA API support, * but would still like to take advantage of automatic MSI remapping, can use - * this to initialise their own domain appropriately. Users may reserve a + * this to initialise their own domain appropriately. Users should reserve a * contiguous IOVA region, starting at @base, large enough to accommodate the * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address - * used by the devices attached to @domain. The other way round is to provide - * usable iova pages through the iommu_dma_bind_guest_msi API (nested stages - * use case) + * used by the devices attached to @domain. */ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { struct iommu_dma_cookie *cookie; - int nesting, ret;
if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; @@ -119,17 +110,11 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) if (domain->iova_cookie) return -EEXIST;
- ret = iommu_domain_get_attr(domain, DOMAIN_ATTR_NESTING, &nesting); - if (!ret && nesting) - cookie = cookie_alloc(IOMMU_DMA_NESTED_MSI_COOKIE); - else - cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); - + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); if (!cookie) return -ENOMEM;
- if (!nesting) - cookie->msi_iova = base; + cookie->msi_iova = base; domain->iova_cookie = cookie; return 0; } @@ -153,116 +138,15 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) put_iova_domain(&cookie->iovad);
- spin_lock(&cookie->msi_lock); list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { - if (cookie->type == IOMMU_DMA_NESTED_MSI_COOKIE && msi->phys) { - size_t size = cookie_msi_granule(cookie); - - WARN_ON(iommu_unmap(domain, msi->gpa, size) != size); - } list_del(&msi->list); kfree(msi); } - spin_unlock(&cookie->msi_lock); kfree(cookie); domain->iova_cookie = NULL; } EXPORT_SYMBOL(iommu_put_dma_cookie);
-/** - * iommu_dma_bind_guest_msi - Allows to pass the stage 1 - * binding of a virtual MSI doorbell used by @dev. - * - * @domain: domain handle - * @giova: guest iova - * @gpa: gpa of the virtual doorbell - * @size: size of the granule used for the stage1 mapping - * - * In nested stage use case, the user can provide IOVA/IPA bindings - * corresponding to a guest MSI stage 1 mapping. When the host needs - * to map its own MSI doorbells, it can use @gpa as stage 2 input - * and map it onto the physical MSI doorbell. - */ -int iommu_dma_bind_guest_msi(struct iommu_domain *domain, - dma_addr_t giova, phys_addr_t gpa, size_t size) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iommu_dma_msi_page *msi; - int ret = 0; - - if (!cookie) - return -EINVAL; - - if (cookie->type != IOMMU_DMA_NESTED_MSI_COOKIE) - return -EINVAL; - - /* - * we currently do not support S1 granule larger than S2 one - * as this would oblige to have multiple S2 mappings for a - * single S1 one - */ - if (size > cookie_msi_granule(cookie)) - return -EINVAL; - - giova = giova & ~(dma_addr_t)(size - 1); - gpa = gpa & ~(phys_addr_t)(size - 1); - - spin_lock(&cookie->msi_lock); - - list_for_each_entry(msi, &cookie->msi_page_list, list) { - if (msi->iova == giova) - goto unlock; /* this page is already registered */ - } - - msi = kzalloc(sizeof(*msi), GFP_ATOMIC); - if (!msi) { - ret = -ENOMEM; - goto unlock; - } - - msi->iova = giova; - msi->gpa = gpa; - msi->s1_granule = size; - list_add(&msi->list, &cookie->msi_page_list); -unlock: - spin_unlock(&cookie->msi_lock); - return ret; -} -EXPORT_SYMBOL(iommu_dma_bind_guest_msi); - -void iommu_dma_unbind_guest_msi(struct iommu_domain *domain, dma_addr_t giova) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iommu_dma_msi_page *msi; - - if (!cookie) - return; - - if (cookie->type != IOMMU_DMA_NESTED_MSI_COOKIE) - return; - - spin_lock(&cookie->msi_lock); - - list_for_each_entry(msi, &cookie->msi_page_list, list) { - dma_addr_t aligned_giova = - giova & ~(dma_addr_t)(msi->s1_granule - 1); - - if (msi->iova == aligned_giova) { - if (msi->phys) { - /* unmap the stage 2 */ - size_t size = cookie_msi_granule(cookie); - - WARN_ON(iommu_unmap(domain, msi->gpa, size) != size); - } - list_del(&msi->list); - kfree(msi); - break; - } - } - spin_unlock(&cookie->msi_lock); -} -EXPORT_SYMBOL(iommu_dma_unbind_guest_msi); - /** * iommu_dma_get_resv_regions - Reserved region driver helper * @dev: Device from iommu_get_resv_regions() @@ -1314,58 +1198,6 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) dev_name(dev)); }
-/* - * iommu_dma_get_nested_msi_page - Returns a nested stage MSI page - * mapping translating into the physical doorbell address @msi_addr - * - * In nested mode, the userspace provides the guest - * gIOVA - gDB stage 1 mappings. When we need to build a stage 2 - * mapping for a physical doorbell (@msi_addr), we look up - * for an unused S1 mapping and map the gDB onto @msi_addr - */ -static struct iommu_dma_msi_page * -iommu_dma_get_nested_msi_page(struct iommu_domain *domain, - phys_addr_t msi_addr) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iommu_dma_msi_page *iter, *msi_page = NULL; - size_t size = cookie_msi_granule(cookie); - int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; - - spin_lock(&cookie->msi_lock); - list_for_each_entry(iter, &cookie->msi_page_list, list) - if (iter->phys == msi_addr) { - msi_page = iter; - goto unlock; - } - - /* - * No nested mapping exists for the physical doorbell, - * look for an unused S1 mapping - */ - list_for_each_entry(iter, &cookie->msi_page_list, list) { - int ret; - - if (iter->phys) - continue; - - /* do the stage 2 mapping */ - ret = iommu_map_atomic(domain, iter->gpa, msi_addr, size, prot); - if (ret) { - pr_warn_once("MSI S2 mapping 0x%llx -> 0x%llx failed (%d)\n", - iter->gpa, msi_addr, ret); - goto unlock; - } - iter->phys = msi_addr; - msi_page = iter; - goto unlock; - } - pr_warn_once("No usable S1 MSI mapping found\n"); -unlock: - spin_unlock(&cookie->msi_lock); - return msi_page; -} - static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr, struct iommu_domain *domain) { @@ -1376,10 +1208,6 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, size_t size = cookie_msi_granule(cookie);
msi_addr &= ~(phys_addr_t)(size - 1); - - if (cookie->type == IOMMU_DMA_NESTED_MSI_COOKIE) - return iommu_dma_get_nested_msi_page(domain, msi_addr); - list_for_each_entry(msi_page, &cookie->msi_page_list, list) if (msi_page->phys == msi_addr) return msi_page; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index f112ecdb4af6..2112f21f73d8 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -12,7 +12,6 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> #include <linux/msi.h> -#include <uapi/linux/iommu.h>
/* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); @@ -37,9 +36,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); -int iommu_dma_bind_guest_msi(struct iommu_domain *domain, - dma_addr_t iova, phys_addr_t gpa, size_t size); -void iommu_dma_unbind_guest_msi(struct iommu_domain *domain, dma_addr_t giova);
#else /* CONFIG_IOMMU_DMA */
@@ -78,18 +74,6 @@ static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, { }
-static inline int -iommu_dma_bind_guest_msi(struct iommu_domain *domain, - dma_addr_t iova, phys_addr_t gpa, size_t size) -{ - return -ENODEV; -} - -static inline void -iommu_dma_unbind_guest_msi(struct iommu_domain *domain, dma_addr_t giova) -{ -} - static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { }