diff options
author | Christoph Hellwig <hch@lst.de> | 2018-08-20 16:21:10 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-10-19 08:46:58 +0200 |
commit | c4dae366925f929749b2a26efa53b561904a9a4f (patch) | |
tree | d963b2c400b43fe7462848d3bf019cc172c59a75 /kernel | |
parent | 4803b44e68fc08e76f00dec90074d199a11ad6f5 (diff) | |
download | linux-stable-c4dae366925f929749b2a26efa53b561904a9a4f.tar.gz linux-stable-c4dae366925f929749b2a26efa53b561904a9a4f.tar.bz2 linux-stable-c4dae366925f929749b2a26efa53b561904a9a4f.zip |
swiotlb: refactor swiotlb_map_page
Remove the somewhat useless map_single function, and replace it with a
swiotlb_bounce_page handler that handles everything related to actually
bouncing a page.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/dma/swiotlb.c | 67 |
1 files changed, 30 insertions, 37 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 15755d7a5242..57507b18caa4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -544,26 +544,6 @@ found: } /* - * Allocates bounce buffer and returns its physical address. - */ -static phys_addr_t -map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - dma_addr_t start_dma_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) { - dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", - &phys); - return SWIOTLB_MAP_ERROR; - } - - start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); - return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, - dir, attrs); -} - -/* * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, @@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, return true; } +static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + dma_addr_t dma_addr; + + if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) { + dev_warn_ratelimited(dev, + "Cannot do DMA to address %pa\n", phys); + return DIRECT_MAPPING_ERROR; + } + + /* Oh well, have to allocate and map a bounce buffer. */ + *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), + *phys, size, dir, attrs); + if (*phys == SWIOTLB_MAP_ERROR) + return DIRECT_MAPPING_ERROR; + + /* Ensure that the address returned is DMA'ble */ + dma_addr = __phys_to_dma(dev, *phys); + if (unlikely(!dma_capable(dev, dma_addr, size))) { + swiotlb_tbl_unmap_single(dev, *phys, size, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); + return DIRECT_MAPPING_ERROR; + } + + return dma_addr; +} + /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. @@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - phys_addr_t map, phys = page_to_phys(page) + offset; + phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); BUG_ON(dir == DMA_NONE); @@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, return dev_addr; trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - - /* Oh well, have to allocate and map a bounce buffer. */ - map = map_single(dev, phys, size, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) - return DIRECT_MAPPING_ERROR; - - dev_addr = __phys_to_dma(dev, map); - - /* Ensure that the address returned is DMA'ble */ - if (dma_capable(dev, dev_addr, size)) - return dev_addr; - - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - - return DIRECT_MAPPING_ERROR; + return swiotlb_bounce_page(dev, &phys, size, dir, attrs); } /* |