summaryrefslogtreecommitdiffstats
path: root/kernel/dma/direct.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-11-09 15:41:01 +0100
committerChristoph Hellwig <hch@lst.de>2021-12-07 12:47:38 +0100
commita90cf30437489343b8386ae87b4827b6d6c3ed50 (patch)
tree65dd1ad2ed4fac84b87a36df276ebf385ce48232 /kernel/dma/direct.c
parent5570449b6876f215d49ac4db9ccce6ff7aa1e20a (diff)
downloadlinux-a90cf30437489343b8386ae87b4827b6d6c3ed50.tar.gz
linux-a90cf30437489343b8386ae87b4827b6d6c3ed50.tar.bz2
linux-a90cf30437489343b8386ae87b4827b6d6c3ed50.zip
dma-direct: always leak memory that can't be re-encrypted
We must never let unencrypted memory go back into the general page pool. So if we fail to set it back to encrypted when freeing DMA memory, leak the memory instead and warn the user. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r--kernel/dma/direct.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 996ba4edb2fa..d7a489be4847 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -84,9 +84,14 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
{
+ int ret;
+
if (!force_dma_unencrypted(dev))
return 0;
- return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+ ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+ if (ret)
+ pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
+ return ret;
}
static void __dma_direct_free_pages(struct device *dev, struct page *page,
@@ -261,7 +266,6 @@ done:
return ret;
out_encrypt_pages:
- /* If memory cannot be re-encrypted, it must be leaked */
if (dma_set_encrypted(dev, page_address(page), size))
return NULL;
out_free_pages:
@@ -307,7 +311,8 @@ void dma_direct_free(struct device *dev, size_t size,
} else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size);
- dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+ if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
+ return;
}
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
@@ -361,7 +366,8 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_from_pool(dev, vaddr, size))
return;
- dma_set_encrypted(dev, vaddr, 1 << page_order);
+ if (dma_set_encrypted(dev, vaddr, 1 << page_order))
+ return;
__dma_direct_free_pages(dev, page, size);
}