summaryrefslogtreecommitdiffstats
path: root/include/linux/swiotlb.h
diff options
context:
space:
mode:
authorPetr Tesarik <petr.tesarik.ext@huawei.com>2023-08-01 08:24:03 +0200
committerChristoph Hellwig <hch@lst.de>2023-08-01 18:02:27 +0200
commit1aaa736815eb04f4dae3f0b3e977b2a0677a4cfb (patch)
tree49a199f7e78ee1556f97e08ccde26ad7d86c67c4 /include/linux/swiotlb.h
parentad96ce3252dbab773cb343220662df3d84dd8e80 (diff)
downloadlinux-1aaa736815eb04f4dae3f0b3e977b2a0677a4cfb.tar.gz
linux-1aaa736815eb04f4dae3f0b3e977b2a0677a4cfb.tar.bz2
linux-1aaa736815eb04f4dae3f0b3e977b2a0677a4cfb.zip
swiotlb: allocate a new memory pool when existing pools are full
When swiotlb_find_slots() cannot find suitable slots, schedule the allocation of a new memory pool. It is not possible to allocate the pool immediately, because this code may run in interrupt context, which is not suitable for large memory allocations. This means that the memory pool will be available too late for the currently requested mapping, but the stress on the software IO TLB allocator is likely to continue, and subsequent allocations will benefit from the additional pool eventually. Keep all memory pools for an allocator in an RCU list to avoid locking on the read side. For modifications, add a new spinlock to struct io_tlb_mem. The spinlock also protects updates to the total number of slabs (nslabs in struct io_tlb_mem), but not reads of the value. Readers may therefore encounter a stale value, but this is not an issue: - swiotlb_tbl_map_single() and is_swiotlb_active() only check for non-zero value. This is ensured by the existence of the default memory pool, allocated at boot. - The exact value is used only for non-critical purposes (debugfs, kernel messages). Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'include/linux/swiotlb.h')
-rw-r--r--include/linux/swiotlb.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 9825fa14abe4..8371c92a0271 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
@@ -104,12 +105,16 @@ struct io_tlb_pool {
/**
* struct io_tlb_mem - Software IO TLB allocator
* @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
* @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs.
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
* @can_grow: %true if more pools can be allocated dynamically.
* @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -125,6 +130,9 @@ struct io_tlb_mem {
#ifdef CONFIG_SWIOTLB_DYNAMIC
bool can_grow;
u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;