summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-03-14 08:02:57 +0100
committerChristoph Hellwig <hch@lst.de>2022-04-18 07:21:13 +0200
commit7374153d294eb51de5a81ac38ff1c4fef8927bec (patch)
treed16ca1fc86fa89880cd0e9482118b5faf58ad403 /kernel/dma
parent742519538e6b07250c8085bbff4bd358bc03bf16 (diff)
downloadlinux-stable-7374153d294eb51de5a81ac38ff1c4fef8927bec.tar.gz
linux-stable-7374153d294eb51de5a81ac38ff1c4fef8927bec.tar.bz2
linux-stable-7374153d294eb51de5a81ac38ff1c4fef8927bec.zip
swiotlb: provide swiotlb_init variants that remap the buffer
To shared more code between swiotlb and xen-swiotlb, offer a swiotlb_init_remap interface and add a remap callback to swiotlb_init_late that will allow Xen to remap the buffer without duplicating much of the logic. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/swiotlb.c36
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 119187afc65e..f6acfc7a41bf 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
-void __init swiotlb_init(bool addressing_limit, unsigned int flags)
+void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ int (*remap)(void *tlb, unsigned long nslabs))
{
- size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
+ unsigned long nslabs = default_nslabs;
+ size_t bytes;
void *tlb;
if (!addressing_limit && !swiotlb_force_bounce)
@@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
* allow to pick a location everywhere for hypervisors with guest
* memory encryption.
*/
+retry:
+ bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
if (flags & SWIOTLB_ANY)
tlb = memblock_alloc(bytes, PAGE_SIZE);
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb)
goto fail;
+ if (remap && remap(tlb, nslabs) < 0) {
+ memblock_free(tlb, PAGE_ALIGN(bytes));
+
+ nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+ if (nslabs < IO_TLB_MIN_SLABS)
+ panic("%s: Failed to remap %zu bytes\n",
+ __func__, bytes);
+ goto retry;
+ }
if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
goto fail_free_mem;
return;
@@ -287,12 +300,18 @@ fail:
pr_warn("Cannot allocate buffer");
}
+void __init swiotlb_init(bool addressing_limit, unsigned int flags)
+{
+ return swiotlb_init_remap(addressing_limit, flags, NULL);
+}
+
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
-int swiotlb_init_late(size_t size, gfp_t gfp_mask)
+int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ int (*remap)(void *tlb, unsigned long nslabs))
{
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned long bytes;
@@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
if (swiotlb_force_disable)
return 0;
+retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
@@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
(PAGE_SIZE << order) >> 20);
nslabs = SLABS_PER_PAGE << order;
}
+ if (remap)
+ rc = remap(vstart, nslabs);
+ if (rc) {
+ free_pages((unsigned long)vstart, order);
+
+ nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+ if (nslabs < IO_TLB_MIN_SLABS)
+ return rc;
+ goto retry;
+ }
rc = swiotlb_late_init_with_tbl(vstart, nslabs);
if (rc)
free_pages((unsigned long)vstart, order);