From: jiangdongxu1 jiangdongxu1@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I86ITO
----------------------------------------------------------------------
When attach dma_dev to iommu domain, check the device's reserved regions and test whether the IOMMU translates MSI transactions. If yes, we initialize an IOVA allocator through the iommu_get_msi_cookie API. This will allow the MSI IOVAs to be transparently allocated on MSI controller's compose().
Signed-off-by: jiangdongxu1 jiangdongxu1@huawei.com --- drivers/vhost/vdpa.c | 60 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 3 deletions(-)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 23a27df16cff..9bd524e2ed71 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -22,6 +22,7 @@ #include <linux/vdpa.h> #include <linux/nospec.h> #include <linux/vhost.h> +#include <linux/dma-iommu.h>
#include "vhost.h"
@@ -49,6 +50,7 @@ struct vhost_vdpa { struct completion completion; struct vdpa_device *vdpa; struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS]; + struct vhost_iotlb resv_iotlb; struct device dev; struct cdev cdev; atomic_t opened; @@ -1251,6 +1253,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, msg->iova + msg->size - 1 > v->range.last) return -EINVAL;
+ if (vhost_iotlb_itree_first(&v->resv_iotlb, msg->iova, + msg->iova + msg->size - 1)) + return -EINVAL; + if (vhost_iotlb_itree_first(iotlb, msg->iova, msg->iova + msg->size - 1)) return -EEXIST; @@ -1339,6 +1345,46 @@ static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb, return vhost_chr_write_iter(dev, from); }
+static int vhost_vdpa_resv_iommu_region(struct iommu_domain *domain, struct device *dma_dev, + struct vhost_iotlb *resv_iotlb) +{ + struct list_head dev_resv_regions; + phys_addr_t resv_msi_base = 0; + struct iommu_resv_region *region; + int ret = 0; + bool with_sw_msi = false; + bool with_hw_msi = false; + + INIT_LIST_HEAD(&dev_resv_regions); + iommu_get_resv_regions(dma_dev, &dev_resv_regions); + + list_for_each_entry(region, &dev_resv_regions, list) { + ret = vhost_iotlb_add_range_ctx(resv_iotlb, region->start, + region->start + region->length - 1, + 0, 0, NULL); + if (ret) { + vhost_iotlb_reset(resv_iotlb); + break; + } + + if (region->type == IOMMU_RESV_MSI) + with_hw_msi = true; + + if (region->type == IOMMU_RESV_SW_MSI) { + resv_msi_base = region->start; + with_sw_msi = true; + } + + } + + if (!ret && !with_hw_msi && with_sw_msi) + ret = iommu_get_msi_cookie(domain, resv_msi_base); + + iommu_put_resv_regions(dma_dev, &dev_resv_regions); + + return ret; +} + static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; @@ -1364,11 +1410,16 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
ret = iommu_attach_device(v->domain, dma_dev); if (ret) - goto err_attach; + goto err_alloc_domain;
- return 0; + ret = vhost_vdpa_resv_iommu_region(v->domain, dma_dev, &v->resv_iotlb); + if (ret) + goto err_attach_device;
-err_attach: + return 0; +err_attach_device: + iommu_detach_device(v->domain, dma_dev); +err_alloc_domain: iommu_domain_free(v->domain); v->domain = NULL; return ret; @@ -1495,6 +1546,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_vdpa_unbind_mm(v); vhost_vdpa_config_put(v); vhost_vdpa_cleanup(v); + vhost_iotlb_reset(&v->resv_iotlb); mutex_unlock(&d->mutex);
atomic_dec(&v->opened); @@ -1627,6 +1679,8 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) goto err; }
+ vhost_iotlb_init(&v->resv_iotlb, 0, 0); + r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor); if (r) goto err;