From: Xiang Chen chenxiang66@hisilicon.com
Create iova_rcache to show how many cpu_rcache and share cache are used for the domain.
Signed-off-by: Xiang Chen chenxiang66@hisilicon.com --- drivers/iommu/dma-iommu.c | 2 ++ drivers/iommu/iommu-debugfs.c | 51 +++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/iova.c | 21 ------------------ include/linux/iommu.h | 3 +++ include/linux/iova.h | 22 +++++++++++++++++-- 5 files changed, 76 insertions(+), 23 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a1431a9..4cb63b2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -387,6 +387,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, if (!dev) return 0;
+ debugfs_create_iova_file(domain); + return iova_reserve_iommu_regions(dev, domain); }
diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c index 224b90f..6489430 100644 --- a/drivers/iommu/iommu-debugfs.c +++ b/drivers/iommu/iommu-debugfs.c @@ -10,6 +10,8 @@ #include <linux/pci.h> #include <linux/iommu.h> #include <linux/debugfs.h> +#include <linux/iova.h> +#include <linux/dma-iommu.h>
struct dentry *iommu_debugfs_dir; EXPORT_SYMBOL_GPL(iommu_debugfs_dir); @@ -77,3 +79,52 @@ void debugfs_destroy_domain_dir(struct iommu_group *group)
debugfs_remove_recursive(domain->domain_dir); } + +static int debugfs_iova_rcache_show(struct seq_file *s, void *p) +{ + struct iommu_domain *iommu_domain = s->private; + struct iova_domain *iovad = iommu_domain_to_iova(iommu_domain); + char string[1024]; + unsigned long flags; + unsigned int cpu; + int i; + + for_each_online_cpu(cpu) { + sprintf(string, "cpu%d ", cpu); + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + struct iova_rcache *rcache = &iovad->rcaches[i]; + struct iova_cpu_rcache *cpu_rcache; + unsigned long loaded_size, prev_size; + + cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_irqsave(&cpu_rcache->lock, flags); + loaded_size = cpu_rcache->loaded->size; + prev_size = cpu_rcache->prev->size; + spin_unlock_irqrestore(&cpu_rcache->lock, flags); + sprintf(string + strlen(string), "[%d]=%ld|%ld ", + i, loaded_size, prev_size); + } + pr_err("%s\n", string); + } + + sprintf(string, "share cache: "); + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; i++) { + struct iova_rcache *rcache = &iovad->rcaches[i]; + int size; + + spin_lock(&rcache->lock); + size = rcache->depot_size; + spin_unlock(&rcache->lock); + sprintf(string + strlen(string), "[%d]=%d ", i, size); + } + pr_err("%s\n", string); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_iova_rcache); + +void debugfs_create_iova_file(struct iommu_domain *domain) +{ + debugfs_create_file("iova_rcache", 0400, domain->domain_dir, + domain, &debugfs_iova_rcache_fops); +} + diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 0374e23b..ecdebcd 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -775,27 +775,6 @@ reserve_iova(struct iova_domain *iovad, } EXPORT_SYMBOL_GPL(reserve_iova);
-/* - * Magazine caches for IOVA ranges. For an introduction to magazines, - * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab - * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams. - * For simplicity, we use a static magazine size and don't implement the - * dynamic size tuning described in the paper. - */ - -#define IOVA_MAG_SIZE 128 - -struct iova_magazine { - unsigned long size; - unsigned long pfns[IOVA_MAG_SIZE]; -}; - -struct iova_cpu_rcache { - spinlock_t lock; - struct iova_magazine *loaded; - struct iova_magazine *prev; -}; - static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { return kzalloc(sizeof(struct iova_magazine), flags); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ef75bdb6..905a0ee 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1042,6 +1042,7 @@ void debugfs_create_iovad_dir(void); void debugfs_destroy_iovad_dir(void); void debugfs_create_domain_dir(struct iommu_group *group); void debugfs_destroy_domain_dir(struct iommu_group *group); +void debugfs_create_iova_file(struct iommu_domain *domain);
#else static inline void iommu_debugfs_setup(void) {} @@ -1049,6 +1050,8 @@ static inline void debugfs_create_iovad_dir(void) {} static inline void debugfs_destroy_iovad_dir(void) {} static inline void debugfs_create_domain_dir(struct iommu_group *group) {} static inline void debugfs_destroy_domain_dir(struct iommu_group *group) {} +static inline void debugfs_create_iova_file(struct iommu_domain *domain) {} + #endif
#endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 9386e77..8d935e3 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -23,8 +23,26 @@ struct iova { unsigned long pfn_lo; /* Lowest allocated pfn */ };
-struct iova_magazine; -struct iova_cpu_rcache; +/* + * Magazine caches for IOVA ranges. For an introduction to magazines, + * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab + * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams. + * For simplicity, we use a static magazine size and don't implement the + * dynamic size tuning described in the paper. + */ + +#define IOVA_MAG_SIZE 128 + +struct iova_magazine { + unsigned long size; + unsigned long pfns[IOVA_MAG_SIZE]; +}; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +};
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ #define MAX_GLOBAL_MAGS 32 /* magazines per bin */