diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-06 17:56:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-06 17:56:18 -0700 |
commit | e71e60cd74df9386c3f684c54888f2367050b831 (patch) | |
tree | a519f612fce0a48658dc3e5de8f9568df8ce8074 /kernel | |
parent | f2906aa863381afb0015a9eb7fefad885d4e5a56 (diff) | |
parent | e15db62bc5648ab459a570862f654e787c498faf (diff) | |
download | linux-stable-e71e60cd74df9386c3f684c54888f2367050b831.tar.gz linux-stable-e71e60cd74df9386c3f684c54888f2367050b831.tar.bz2 linux-stable-e71e60cd74df9386c3f684c54888f2367050b831.zip |
Merge tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
- fix a regressin in setting swiotlb ->force_bounce (me)
- make dma-debug less chatty (Rob Clark)
* tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping:
swiotlb: fix setting ->force_bounce
dma-debug: make things less spammy under memory pressure
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/dma/debug.c | 2 | ||||
-rw-r--r-- | kernel/dma/swiotlb.c | 14 |
2 files changed, 7 insertions, 9 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index ac740630c79c..2caafd13f8aa 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { err_printk(entry->dev, entry, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index dfa1de89dc94..cb50f8d38360 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void) } static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, - unsigned long nslabs, bool late_alloc) + unsigned long nslabs, unsigned int flags, bool late_alloc) { void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i; @@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->index = 0; mem->late_alloc = late_alloc; - if (swiotlb_force_bounce) - mem->force_bounce = true; + mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); spin_lock_init(&mem->lock); for (i = 0; i < mem->nslabs; i++) { @@ -275,8 +274,7 @@ retry: panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - mem->force_bounce = flags & SWIOTLB_FORCE; + swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false); if (flags & SWIOTLB_VERBOSE) swiotlb_print_info(); @@ -348,7 +346,7 @@ retry: set_memory_decrypted((unsigned long)vstart, (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true); swiotlb_print_info(); return 0; @@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), rmem->size >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); - mem->force_bounce = true; + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, + false); mem->for_alloc = true; rmem->priv = mem; |