From: Jingxian He hejingxian@huawei.com
mainline inclusion from mainline-v6.9-rc1 commit f4111e39a52aa5d5136d890bbd1aa87c1c8fe3bc category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit f4111e39a52aa5d5136d890bbd1aa87c1c8fe3bc ]
Add the functions, swiotlb_{alloc,free} and is_swiotlb_for_alloc to support the memory allocation from restricted DMA pool.
The restricted DMA pool is preferred if available.
Note that since coherent allocation needs remapping, one must set up another device coherent pool by shared-dma-pool and use dma_alloc_from_dev_coherent instead for atomic coherent allocation.
Signed-off-by: Claire Chang tientzu@chromium.org Reviewed-by: Christoph Hellwig hch@lst.de Tested-by: Stefano Stabellini sstabellini@kernel.org Tested-by: Will Deacon will@kernel.org Acked-by: Stefano Stabellini sstabellini@kernel.org Signed-off-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com
Conflicts: include/linux/swiotlb.h kernel/dma/swiotlb.c [hejingxian: delete modify for struct io_tlb_mem which not exist in OLK5.10] [hejingxian: modify per dev dma_io_tlb_mem start to io_tlb_start] [hejingxian: add define for swiotlb_release_slots and swiotlb_find_slots] [hejingxian: move is_swiotlb_for_alloc into direct.c and return false] Signed-off-by: Jingxian He hejingxian@huawei.com --- include/linux/swiotlb.h | 17 +++++++++++++ kernel/dma/Kconfig | 10 ++++++++ kernel/dma/direct.c | 56 ++++++++++++++++++++++++++++++++--------- kernel/dma/swiotlb.c | 41 ++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+), 12 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5d2dbe7e0..cb225d0a1 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -106,4 +106,21 @@ static inline bool is_swiotlb_active(void) extern void swiotlb_print_info(void); extern void swiotlb_set_max_segment(unsigned int);
+#ifdef CONFIG_DMA_RESTRICTED_POOL + +struct page *swiotlb_alloc(struct device *dev, size_t size); +bool swiotlb_free(struct device *dev, struct page *page, size_t size); + +#else +static inline struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + return NULL; +} +static inline bool swiotlb_free(struct device *dev, struct page *page, + size_t size) +{ + return false; +} +#endif /* CONFIG_DMA_RESTRICTED_POOL */ + #endif /* __LINUX_SWIOTLB_H */ diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 07f30651b..8151fd2b6 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -84,6 +84,16 @@ config SWIOTLB bool select NEED_DMA_MAP_STATE
+config DMA_RESTRICTED_POOL + bool "DMA Restricted Pool" + depends on OF && OF_RESERVED_MEM && SWIOTLB + help + This enables support for restricted DMA pools which provide a level of + DMA memory protection on systems with limited hardware protection + capabilities, such as those lacking an IOMMU. + + If unsure, say "n". + # # Should be selected if we can mmap non-coherent mappings to userspace. # The only thing that is really required is a way to set an uncached bit diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 2922250f9..f13493c54 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -22,6 +22,13 @@ */ unsigned int zone_dma_bits __ro_after_init = 24;
+#ifndef is_swiotlb_for_alloc +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return false; +} +#endif + static inline dma_addr_t phys_to_dma_direct(struct device *dev, phys_addr_t phys) { @@ -75,6 +82,15 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); }
+static void __dma_direct_free_pages(struct device *dev, struct page *page, + size_t size) +{ + if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && + swiotlb_free(dev, page, size)) + return; + dma_free_contiguous(dev, page, size); +} + static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp) { @@ -86,6 +102,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); + if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && + is_swiotlb_for_alloc(dev)) { + page = swiotlb_alloc(dev, size); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + __dma_direct_free_pages(dev, page, size); + return NULL; + } + return page; + } + page = dma_alloc_contiguous(dev, size, gfp); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); @@ -142,7 +168,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && - !force_dma_unencrypted(dev)) { + !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); if (!page) return NULL; @@ -155,18 +181,23 @@ void *dma_direct_alloc(struct device *dev, size_t size, }
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev)) + !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) && + !is_swiotlb_for_alloc(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
/* * Remapping or decrypting memory may block. If either is required and * we can't block, allocate the memory from the atomic pools. + * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must + * set up another device coherent pool by shared-dma-pool and use + * dma_alloc_from_dev_coherent instead. */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && !gfpflags_allow_blocking(gfp) && (force_dma_unencrypted(dev) || - (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev)))) + (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !dev_is_dma_coherent(dev))) && + !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */ @@ -237,7 +268,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, return NULL; } out_free_pages: - dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); return NULL; }
@@ -245,15 +276,15 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && - !force_dma_unencrypted(dev)) { + !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ dma_free_contiguous(dev, cpu_addr, size); return; }
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev)) { + !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) && + !is_swiotlb_for_alloc(dev)) { arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); return; } @@ -271,7 +302,7 @@ void dma_direct_free(struct device *dev, size_t size, else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) arch_dma_clear_uncached(cpu_addr, size);
- dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); + __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); }
struct page *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -281,7 +312,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, void *ret;
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && - force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && + !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp); @@ -307,7 +339,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return page; out_free_pages: - dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); return NULL; }
@@ -325,7 +357,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, if (force_dma_unencrypted(dev)) set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
- dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); }
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d897d1613..79dfd1078 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -772,3 +772,44 @@ static int __init swiotlb_create_debugfs(void) late_initcall(swiotlb_create_debugfs);
#endif + +#ifdef CONFIG_DMA_RESTRICTED_POOL + +static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, + size_t size) +{ + return find_slots(dev, orig_addr, size); +} + +struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + phys_addr_t tlb_addr; + int index; + + index = swiotlb_find_slots(dev, 0, size); + if (index == -1) + return NULL; + + tlb_addr = slot_addr(io_tlb_start, index); + + return pfn_to_page(PFN_DOWN(tlb_addr)); +} + +static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr, + size_t alloc_size) +{ +} + +bool swiotlb_free(struct device *dev, struct page *page, size_t size) +{ + phys_addr_t tlb_addr = page_to_phys(page); + + if (!is_swiotlb_buffer(tlb_addr)) + return false; + + swiotlb_release_slots(dev, tlb_addr, size); + + return true; +} + +#endif /* CONFIG_DMA_RESTRICTED_POOL */