summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-02-03 18:11:10 +0100
committerChristoph Hellwig <hch@lst.de>2020-02-05 18:50:55 +0100
commit91ef26f914171cf753330f13724fd9142b5b1640 (patch)
tree67c2f70a79ebbe6ba1fcc802d13f4d24b06daf43 /kernel/dma
parent8c8c5a4994a306c217fd061cbfc5903399fd4c1c (diff)
downloadlinux-91ef26f914171cf753330f13724fd9142b5b1640.tar.gz
linux-91ef26f914171cf753330f13724fd9142b5b1640.tar.bz2
linux-91ef26f914171cf753330f13724fd9142b5b1640.zip
dma-direct: relax addressability checks in dma_direct_supported
dma_direct_supported tries to find the minimum addressable bitmask based on the end pfn and optional magic that architectures can use to communicate the size of the magic ZONE_DMA that can be used for bounce buffering. But between the DMA offsets that can change per device (or sometimes even region), the fact the ZONE_DMA isn't even guaranteed to be the lowest addresses and failure of having proper interfaces to the MM code this fails at least for one arm subarchitecture. As all the legacy DMA implementations have supported 32-bit DMA masks, and 32-bit masks are guranteed to always work by the API contract (using bounce buffers if needed), we can short cut the complicated check and always return true without breaking existing assumptions. Hopefully we can properly clean up the interaction with the arch defined zones and the bootmem allocator eventually. Fixes: ad3c7b18c5b3 ("arm: use swiotlb for bounce buffering on LPAE configs") Reported-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/direct.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 6af7ae83c4ad..32ec69cdba54 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -472,28 +472,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
}
#endif /* CONFIG_MMU */
-/*
- * Because 32-bit DMA masks are so common we expect every architecture to be
- * able to satisfy them - either by not supporting more physical memory, or by
- * providing a ZONE_DMA32. If neither is the case, the architecture needs to
- * use an IOMMU instead of the direct mapping.
- */
int dma_direct_supported(struct device *dev, u64 mask)
{
- u64 min_mask;
-
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = DMA_BIT_MASK(zone_dma_bits);
- else
- min_mask = DMA_BIT_MASK(32);
+ u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
- min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
+ /*
+ * Because 32-bit DMA masks are so common we expect every architecture
+ * to be able to satisfy them - either by not supporting more physical
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
+ * architecture needs to use an IOMMU instead of the direct mapping.
+ */
+ if (mask >= DMA_BIT_MASK(32))
+ return 1;
/*
* This check needs to be against the actual bit mask value, so
* use __phys_to_dma() here so that the SME encryption mask isn't
* part of the check.
*/
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
return mask >= __phys_to_dma(dev, min_mask);
}