summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorPetr Tesarik <petr.tesarik.ext@huawei.com>2023-08-01 08:24:04 +0200
committerChristoph Hellwig <hch@lst.de>2023-08-01 18:02:32 +0200
commit1395706a14904f2593debecf20f827e72d7392a7 (patch)
tree196190ece0128e3baafd308d626f6563bf277060 /kernel/dma
parent1aaa736815eb04f4dae3f0b3e977b2a0677a4cfb (diff)
downloadlinux-stable-1395706a14904f2593debecf20f827e72d7392a7.tar.gz
linux-stable-1395706a14904f2593debecf20f827e72d7392a7.tar.bz2
linux-stable-1395706a14904f2593debecf20f827e72d7392a7.zip
swiotlb: search the software IO TLB only if the device makes use of it
Skip searching the software IO TLB if a device has never used it, making sure these devices are not affected by the introduction of multiple IO TLB memory pools. Additional memory barrier is required to ensure that the new value of the flag is visible to other CPUs after mapping a new bounce buffer. For efficiency, the flag check should be inlined, and then the memory barrier must be moved to is_swiotlb_buffer(). However, it can replace the existing barrier in swiotlb_find_pool(), because all callers use is_swiotlb_buffer() first to verify that the buffer address belongs to the software IO TLB. Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/swiotlb.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index adf80dec42d7..d7eac84f975b 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -730,7 +730,7 @@ static void swiotlb_dyn_alloc(struct work_struct *work)
add_mem_pool(mem, pool);
- /* Pairs with smp_rmb() in swiotlb_find_pool(). */
+ /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
smp_wmb();
}
@@ -764,11 +764,6 @@ struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
-
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (paddr >= pool->start && paddr < pool->end)
@@ -813,6 +808,7 @@ void swiotlb_dev_init(struct device *dev)
#ifdef CONFIG_SWIOTLB_DYNAMIC
INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
spin_lock_init(&dev->dma_io_tlb_lock);
+ dev->dma_uses_io_tlb = false;
#endif
}
@@ -1157,9 +1153,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
- /* Pairs with smp_rmb() in swiotlb_find_pool(). */
- smp_wmb();
found:
+ dev->dma_uses_io_tlb = true;
+ /* Pairs with smp_rmb() in is_swiotlb_buffer() */
+ smp_wmb();
+
*retpool = pool;
return index;
}