diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-04-12 09:42:34 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-04-12 09:42:34 +0200 |
commit | ef389b734691cdc8beb009dd402135dcdcb86a56 (patch) | |
tree | 9523a37db93cb7c7874a5f18b4d9a7014898b814 /lib/swiotlb.c | |
parent | a774635db5c430cbf21fa5d2f2df3d23aaa8e782 (diff) | |
parent | c76fc98260751e71c884dc1a18a07e427ef033b5 (diff) | |
download | linux-ef389b734691cdc8beb009dd402135dcdcb86a56.tar.gz linux-ef389b734691cdc8beb009dd402135dcdcb86a56.tar.bz2 linux-ef389b734691cdc8beb009dd402135dcdcb86a56.zip |
Merge branch 'WIP.x86/asm' into x86/urgent, because the topic is ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r-- | lib/swiotlb.c | 77 |
1 files changed, 17 insertions, 60 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c67b06261160..32aacd0d56a8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include <linux/gfp.h> #include <linux/scatterlist.h> #include <linux/mem_encrypt.h> +#include <linux/set_memory.h> #include <asm/io.h> #include <asm/dma.h> @@ -156,22 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } - -/* For swiotlb, clear memory encryption mask from dma addresses */ -static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, - phys_addr_t address) -{ - return __sme_clr(phys_to_dma(hwdev, address)); -} - -/* Note that this doesn't work with highmem page */ -static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, - volatile void *address) -{ - return phys_to_dma(hwdev, virt_to_phys(address)); -} - static bool no_iotlb_memory; void swiotlb_print_info(void) @@ -209,12 +194,12 @@ void __init swiotlb_update_mem_attributes(void) vaddr = phys_to_virt(io_tlb_start); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); vaddr = phys_to_virt(io_tlb_overflow_buffer); bytes = PAGE_ALIGN(io_tlb_overflow); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } @@ -355,7 +340,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; - swiotlb_set_mem_attributes(tlb, bytes); + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); /* @@ -366,7 +351,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!v_overflow_buffer) goto cleanup2; - swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); + set_memory_decrypted((unsigned long)v_overflow_buffer, + io_tlb_overflow >> PAGE_SHIFT); memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); @@ -622,7 +608,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, return SWIOTLB_MAP_ERROR; } - start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); + start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir, attrs); } @@ -706,6 +692,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } } +#ifdef CONFIG_DMA_DIRECT_OPS static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, size_t size) { @@ -726,12 +713,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, goto out_warn; phys_addr = swiotlb_tbl_map_single(dev, - swiotlb_phys_to_dma(dev, io_tlb_start), + __phys_to_dma(dev, io_tlb_start), 0, size, DMA_FROM_DEVICE, 0); if (phys_addr == SWIOTLB_MAP_ERROR) goto out_warn; - *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); + *dma_handle = __phys_to_dma(dev, phys_addr); if (dma_coherent_ok(dev, *dma_handle, size)) goto out_unmap; @@ -759,28 +746,6 @@ out_warn: return NULL; } -void * -swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - int order = get_order(size); - unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0; - void *ret; - - ret = (void *)__get_free_pages(flags, order); - if (ret) { - *dma_handle = swiotlb_virt_to_bus(hwdev, ret); - if (dma_coherent_ok(hwdev, *dma_handle, size)) { - memset(ret, 0, size); - return ret; - } - free_pages((unsigned long)ret, order); - } - - return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs); -} -EXPORT_SYMBOL(swiotlb_alloc_coherent); - static bool swiotlb_free_buffer(struct device *dev, size_t size, dma_addr_t dma_addr) { @@ -799,15 +764,7 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, DMA_ATTR_SKIP_CPU_SYNC); return true; } - -void -swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dev_addr) -{ - if (!swiotlb_free_buffer(hwdev, size, dev_addr)) - free_pages((unsigned long)vaddr, get_order(size)); -} -EXPORT_SYMBOL(swiotlb_free_coherent); +#endif static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, @@ -867,10 +824,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, map = map_single(dev, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) { swiotlb_full(dev, size, dir, 1); - return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); + return __phys_to_dma(dev, io_tlb_overflow_buffer); } - dev_addr = swiotlb_phys_to_dma(dev, map); + dev_addr = __phys_to_dma(dev, map); /* Ensure that the address returned is DMA'ble */ if (dma_capable(dev, dev_addr, size)) @@ -879,7 +836,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); + return __phys_to_dma(dev, io_tlb_overflow_buffer); } /* @@ -1009,7 +966,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, sg_dma_len(sgl) = 0; return 0; } - sg->dma_address = swiotlb_phys_to_dma(hwdev, map); + sg->dma_address = __phys_to_dma(hwdev, map); } else sg->dma_address = dev_addr; sg_dma_len(sg) = sg->length; @@ -1073,7 +1030,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { - return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); + return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); } /* @@ -1085,7 +1042,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) int swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; + return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } #ifdef CONFIG_DMA_DIRECT_OPS |