From: Robin Murphy robin.murphy@arm.com
stable inclusion from stable-v5.10.124 commit 73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I5L6E7
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream.
The original x86 sev_alloc() only called set_memory_decrypted() on memory returned by alloc_pages_node(), so the page order calculation fell out of that logic. However, the common dma-direct code has several potential allocators, not all of which are guaranteed to round up the underlying allocation to a power-of-two size, so carrying over that calculation for the encryption/decryption size was a mistake. Fix it by rounding to a *number* of pages, rather than an order.
Until recently there was an even worse interaction with DMA_DIRECT_REMAP where we could have ended up decrypting part of the next adjacent vmalloc area, only averted by no architecture actually supporting both configs at once. Don't ask how I found that one out...
Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code") Signed-off-by: Robin Murphy robin.murphy@arm.com Signed-off-by: Christoph Hellwig hch@lst.de Acked-by: David Rientjes rientjes@google.com [ backport the functional change without all the prior refactoring ] Signed-off-by: Robin Murphy robin.murphy@arm.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Wei Li liwei391@huawei.com --- kernel/dma/direct.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 06c111544f61..2922250f93b4 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, goto out_free_pages; if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ret = page_address(page); if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -231,7 +231,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, out_encrypt_pages: if (force_dma_unencrypted(dev)) { err = set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); + PFN_UP(size)); /* If memory cannot be re-encrypted, it must be leaked */ if (err) return NULL; @@ -244,8 +244,6 @@ void *dma_direct_alloc(struct device *dev, size_t size, void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - unsigned int page_order = get_order(size); - if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ @@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev, size_t size, return;
if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
ret = page_address(page); if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, - 1 << get_order(size))) + if (set_memory_decrypted((unsigned long)ret, PFN_UP(size))) goto out_free_pages; } memset(ret, 0, size); @@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { - unsigned int page_order = get_order(size); void *vaddr = page_address(page);
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ @@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, return;
if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)vaddr, 1 << page_order); + set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
dma_free_contiguous(dev, page, size); }