diff options
author | David S. Miller <davem@davemloft.net> | 2015-04-16 12:45:18 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-16 12:45:18 -0700 |
commit | a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd (patch) | |
tree | d3bc9387c3d86187c870e967e887513c61d156b4 /lib/iommu-common.c | |
parent | 497a5df7bf6ffd136ae21c49d1a01292930d7ca2 (diff) | |
parent | 671d773297969bebb1732e1cdc1ec03aa53c6be2 (diff) | |
download | linux-a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd.tar.gz linux-a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd.tar.bz2 linux-a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd.zip |
Merge branch 'generic-iommu-allocator'
Sowmini Varadhan says:
====================
Generic IOMMU pooled allocator
Investigation of network performance on Sparc shows a high
degree of locking contention in the IOMMU allocator, and it
was noticed that the PowerPC code has a better locking model.
This patch series tries to extract the generic parts of the
PowerPC code so that it can be shared across multiple PCI
devices and architectures.
v10: resend patchv9 without RFC tag, and a new mail Message-Id,
(previous non-RFC attempt did not show up on the patchwork queue?)
Full revision history below:
v2 changes:
- incorporate David Miller editorial comments: sparc specific
fields moved from iommu-common into sparc's iommu_64.h
- make the npools value an input parameter, for the case when
the iommu map size is not very large
- cookie_to_index mapping, and optimizations for span-boundary
check, for use case such as LDC.
v3: eliminate iommu_sparc, rearrange the ->demap indirection to
be invoked under the pool lock.
v4: David Miller review changes:
- s/IOMMU_ERROR_CODE/DMA_ERROR_CODE
- page_table_map_base and page_table_shift are unsigned long, not u32.
v5: removed ->cookie_to_index and ->demap indirection from the
iommu_tbl_ops The caller needs to call these functions as needed,
before invoking the generic arena allocator functions.
Added the "skip_span_boundary" argument to iommu_tbl_pool_init() for
those callers like LDC which do no care about span boundary checks.
v6: removed iommu_tbl_ops, and instead pass the ->flush_all as
an indirection to iommu_tbl_pool_init(); only invoke ->flush_all
when there is no large_pool, based on the assumption that large-pool
usage is infrequently encountered
v7: moved pool_hash initialization to lib/iommu-common.c and cleaned up
code duplication from sun4v/sun4u/ldc.
v8: Addresses BenH comments with one exception: I've left the
IOMMU_POOL_HASH as is, so that powerpc can tailor it to their
convenience. Discard trylock for simple spin_lock to acquire pool
v9: Addresses latest BenH comments: need_flush checks, add support
for dma mask and align_order.
v10: resend without RFC tag, and new mail Message-Id.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/iommu-common.c')
-rw-r--r-- | lib/iommu-common.c | 220 |
1 files changed, 220 insertions, 0 deletions
diff --git a/lib/iommu-common.c b/lib/iommu-common.c new file mode 100644 index 000000000000..7583f9b7846b --- /dev/null +++ b/lib/iommu-common.c @@ -0,0 +1,220 @@ +/* + * IOMMU mmap management and range allocation functions. + * Based almost entirely upon the powerpc iommu allocator. + */ + +#include <linux/export.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/iommu-helper.h> +#include <linux/iommu-common.h> +#include <linux/dma-mapping.h> + +#define IOMMU_LARGE_ALLOC 15 + +/* + * Initialize iommu_pool entries for the iommu_table. `num_entries' + * is the number of table entries. If `large_pool' is set to true, + * the top 1/4 of the table will be set aside for pool allocations + * of more than IOMMU_LARGE_ALLOC pages. + */ +extern void iommu_tbl_pool_init(struct iommu_table *iommu, + unsigned long num_entries, + u32 page_table_shift, + const struct iommu_tbl_ops *iommu_tbl_ops, + bool large_pool, u32 npools) +{ + unsigned int start, i; + struct iommu_pool *p = &(iommu->large_pool); + + if (npools == 0) + iommu->nr_pools = IOMMU_NR_POOLS; + else + iommu->nr_pools = npools; + BUG_ON(npools > IOMMU_NR_POOLS); + + iommu->page_table_shift = page_table_shift; + iommu->iommu_tbl_ops = iommu_tbl_ops; + start = 0; + if (large_pool) + iommu->flags |= IOMMU_HAS_LARGE_POOL; + + if (!large_pool) + iommu->poolsize = num_entries/iommu->nr_pools; + else + iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; + for (i = 0; i < iommu->nr_pools; i++) { + spin_lock_init(&(iommu->arena_pool[i].lock)); + iommu->arena_pool[i].start = start; + iommu->arena_pool[i].hint = start; + start += iommu->poolsize; /* start for next pool */ + iommu->arena_pool[i].end = start - 1; + } + if (!large_pool) + return; + /* initialize large_pool */ + spin_lock_init(&(p->lock)); + p->start = start; + p->hint = p->start; + p->end = num_entries; +} +EXPORT_SYMBOL(iommu_tbl_pool_init); + +unsigned long iommu_tbl_range_alloc(struct device *dev, + struct iommu_table *iommu, + unsigned long npages, + unsigned long *handle, + unsigned int pool_hash) +{ + unsigned long n, end, start, limit, boundary_size; + struct iommu_pool *arena; + int pass = 0; + unsigned int pool_nr; + unsigned int npools = iommu->nr_pools; + unsigned long flags; + bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); + bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC); + unsigned long shift; + + /* Sanity check */ + if (unlikely(npages == 0)) { + printk_ratelimited("npages == 0\n"); + return DMA_ERROR_CODE; + } + + if (largealloc) { + arena = &(iommu->large_pool); + spin_lock_irqsave(&arena->lock, flags); + pool_nr = 0; /* to keep compiler happy */ + } else { + /* pick out pool_nr */ + pool_nr = pool_hash & (npools - 1); + arena = &(iommu->arena_pool[pool_nr]); + + /* find first available unlocked pool */ + while (!spin_trylock_irqsave(&(arena->lock), flags)) { + pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); + arena = &(iommu->arena_pool[pool_nr]); + } + } + + again: + if (pass == 0 && handle && *handle && + (*handle >= arena->start) && (*handle < arena->end)) + start = *handle; + else + start = arena->hint; + + limit = arena->end; + + /* The case below can happen if we have a small segment appended + * to a large, or when the previous alloc was at the very end of + * the available space. If so, go back to the beginning and flush. + */ + if (start >= limit) { + start = arena->start; + if (iommu->iommu_tbl_ops->reset != NULL) + iommu->iommu_tbl_ops->reset(iommu); + } + + if (dev) + boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, + 1 << iommu->page_table_shift); + else + boundary_size = ALIGN(1UL << 32, 1 << iommu->page_table_shift); + + shift = iommu->page_table_map_base >> iommu->page_table_shift; + boundary_size = boundary_size >> iommu->page_table_shift; + /* + * if the iommu has a non-trivial cookie <-> index mapping, we set + * things up so that iommu_is_span_boundary() merely checks if the + * (index + npages) < num_tsb_entries + */ + if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { + shift = 0; + boundary_size = iommu->poolsize * iommu->nr_pools; + } + n = iommu_area_alloc(iommu->map, limit, start, npages, shift, + boundary_size, 0); + if (n == -1) { + if (likely(pass == 0)) { + /* First failure, rescan from the beginning. */ + arena->hint = arena->start; + if (iommu->iommu_tbl_ops->reset != NULL) + iommu->iommu_tbl_ops->reset(iommu); + pass++; + goto again; + } else if (!largealloc && pass <= iommu->nr_pools) { + spin_unlock(&(arena->lock)); + pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); + arena = &(iommu->arena_pool[pool_nr]); + while (!spin_trylock(&(arena->lock))) { + pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); + arena = &(iommu->arena_pool[pool_nr]); + } + arena->hint = arena->start; + pass++; + goto again; + } else { + /* give up */ + spin_unlock_irqrestore(&(arena->lock), flags); + return DMA_ERROR_CODE; + } + } + + end = n + npages; + + arena->hint = end; + + /* Update handle for SG allocations */ + if (handle) + *handle = end; + spin_unlock_irqrestore(&(arena->lock), flags); + + return n; +} +EXPORT_SYMBOL(iommu_tbl_range_alloc); + +static struct iommu_pool *get_pool(struct iommu_table *tbl, + unsigned long entry) +{ + struct iommu_pool *p; + unsigned long largepool_start = tbl->large_pool.start; + bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); + + /* The large pool is the last pool at the top of the table */ + if (large_pool && entry >= largepool_start) { + p = &tbl->large_pool; + } else { + unsigned int pool_nr = entry / tbl->poolsize; + + BUG_ON(pool_nr >= tbl->nr_pools); + p = &tbl->arena_pool[pool_nr]; + } + return p; +} + +void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr, + unsigned long npages, bool do_demap, void *demap_arg) +{ + unsigned long entry; + struct iommu_pool *pool; + unsigned long flags; + unsigned long shift = iommu->page_table_shift; + + if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { + entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr, + demap_arg); + } else { + entry = (dma_addr - iommu->page_table_map_base) >> shift; + } + pool = get_pool(iommu, entry); + + spin_lock_irqsave(&(pool->lock), flags); + if (do_demap && iommu->iommu_tbl_ops->demap != NULL) + (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages); + + bitmap_clear(iommu->map, entry, npages); + spin_unlock_irqrestore(&(pool->lock), flags); +} +EXPORT_SYMBOL(iommu_tbl_range_free); |