summaryrefslogtreecommitdiffstats
path: root/kernel/dma/direct.h
diff options
context:
space:
mode:
authorLogan Gunthorpe <logang@deltatee.com>2022-07-08 10:50:56 -0600
committerChristoph Hellwig <hch@lst.de>2022-07-26 07:27:47 -0400
commitf02ad36d4f76645e7e1c21f572260e9a2e61c26b (patch)
tree0e46523b735822b5db0e63e4afb0af0841a1e275 /kernel/dma/direct.h
parent7c2645a2a30a45d3dc4c98b315a51be44ec69a67 (diff)
downloadlinux-f02ad36d4f76645e7e1c21f572260e9a2e61c26b.tar.gz
linux-f02ad36d4f76645e7e1c21f572260e9a2e61c26b.tar.bz2
linux-f02ad36d4f76645e7e1c21f572260e9a2e61c26b.zip
dma-direct: support PCI P2PDMA pages in dma-direct map_sg
Add PCI P2PDMA support for dma_direct_map_sg() so that it can map PCI P2PDMA pages directly without a hack in the callers. This allows for heterogeneous SGLs that contain both P2PDMA and regular pages. A P2PDMA page may have three possible outcomes when being mapped: 1) If the data path between the two devices doesn't go through the root port, then it should be mapped with a PCI bus address 2) If the data path goes through the host bridge, it should be mapped normally, as though it were a CPU physical address 3) It is not possible for the two devices to communicate and thus the mapping operation should fail (and it will return -EREMOTEIO). SGL segments that contain PCI bus addresses are marked with sg_dma_mark_pci_p2pdma() and are ignored when unmapped. P2PDMA mappings are also failed if swiotlb needs to be used on the mapping. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma/direct.h')
-rw-r--r--kernel/dma/direct.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index a78c0ba70645..e38ffc5e6bdd 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -8,6 +8,7 @@
#define _KERNEL_DMA_DIRECT_H
#include <linux/dma-direct.h>
+#include <linux/memremap.h>
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (is_swiotlb_force_bounce(dev))
+ if (is_swiotlb_force_bounce(dev)) {
+ if (is_pci_p2pdma_page(page))
+ return DMA_MAPPING_ERROR;
return swiotlb_map(dev, phys, size, dir, attrs);
+ }
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ if (is_pci_p2pdma_page(page))
+ return DMA_MAPPING_ERROR;
if (is_swiotlb_active(dev))
return swiotlb_map(dev, phys, size, dir, attrs);