summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2022-05-20 18:10:13 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-06-22 14:13:20 +0200
commit73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2 (patch)
tree6544a500d154f8bc43b3a01bd690535f40c5e42a
parentaa9a001efa9ccbb66a22a528bdfce9e825709343 (diff)
downloadlinux-stable-73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2.tar.gz
linux-stable-73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2.tar.bz2
linux-stable-73bc8a5e8e3a902d8cc9b2f42505f647ca48fac2.zip
dma-direct: don't over-decrypt memory
commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream. The original x86 sev_alloc() only called set_memory_decrypted() on memory returned by alloc_pages_node(), so the page order calculation fell out of that logic. However, the common dma-direct code has several potential allocators, not all of which are guaranteed to round up the underlying allocation to a power-of-two size, so carrying over that calculation for the encryption/decryption size was a mistake. Fix it by rounding to a *number* of pages, rather than an order. Until recently there was an even worse interaction with DMA_DIRECT_REMAP where we could have ended up decrypting part of the next adjacent vmalloc area, only averted by no architecture actually supporting both configs at once. Don't ask how I found that one out... Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code") Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: David Rientjes <rientjes@google.com> [ backport the functional change without all the prior refactoring ] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/dma/direct.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 06c111544f61..2922250f93b4 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
goto out_free_pages;
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
+ PFN_UP(size));
if (err)
goto out_free_pages;
}
@@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
+ PFN_UP(size));
if (err)
goto out_free_pages;
}
@@ -231,7 +231,7 @@ done:
out_encrypt_pages:
if (force_dma_unencrypted(dev)) {
err = set_memory_encrypted((unsigned long)page_address(page),
- 1 << get_order(size));
+ PFN_UP(size));
/* If memory cannot be re-encrypted, it must be leaked */
if (err)
return NULL;
@@ -244,8 +244,6 @@ out_free_pages:
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
- unsigned int page_order = get_order(size);
-
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
@@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev, size_t size,
return;
if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+ set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
@@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
- if (set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size)))
+ if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
goto out_free_pages;
}
memset(ret, 0, size);
@@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir)
{
- unsigned int page_order = get_order(size);
void *vaddr = page_address(page);
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
@@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
return;
if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
+ set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
dma_free_contiguous(dev, page, size);
}