summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 19:18:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 19:18:36 -0700
commit3f306ea2e18568f693f7763d1c2178f349ae8f31 (patch)
treec44968429a363f73452480a4a083dfdb4bdc1cd9 /drivers
parentfbe86daca0ba878b04fa241b85e26e54d17d4229 (diff)
parent4a37f3dd9a83186cb88d44808ab35b78375082c9 (diff)
downloadlinux-3f306ea2e18568f693f7763d1c2178f349ae8f31.tar.gz
linux-3f306ea2e18568f693f7763d1c2178f349ae8f31.tar.bz2
linux-3f306ea2e18568f693f7763d1c2178f349ae8f31.zip
Merge tag 'dma-mapping-5.19-2022-05-25' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - don't over-decrypt memory (Robin Murphy) - takes min align mask into account for the swiotlb max mapping size (Tianyu Lan) - use GFP_ATOMIC in dma-debug (Mikulas Patocka) - fix DMA_ATTR_NO_KERNEL_MAPPING on xen/arm (me) - don't fail on highmem CMA pages in dma_direct_alloc_pages (me) - cleanup swiotlb initialization and share more code with swiotlb-xen (me, Stefano Stabellini) * tag 'dma-mapping-5.19-2022-05-25' of git://git.infradead.org/users/hch/dma-mapping: (23 commits) dma-direct: don't over-decrypt memory swiotlb: max mapping size takes min align mask into account swiotlb: use the right nslabs-derived sizes in swiotlb_init_late swiotlb: use the right nslabs value in swiotlb_init_remap swiotlb: don't panic when the swiotlb buffer can't be allocated dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages swiotlb-xen: fix DMA_ATTR_NO_KERNEL_MAPPING on arm x86: remove cruft from <asm/dma-mapping.h> swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl swiotlb: merge swiotlb-xen initialization into swiotlb swiotlb: provide swiotlb_init variants that remap the buffer swiotlb: pass a gfp_mask argument to swiotlb_init_late swiotlb: add a SWIOTLB_ANY flag to lift the low memory restriction swiotlb: make the swiotlb_init interface more useful x86: centralize setting SWIOTLB_FORCE when guest memory encryption is enabled x86: remove the IOMMU table infrastructure MIPS/octeon: use swiotlb_init instead of open coding it arm/xen: don't check for xen_initial_domain() in xen_create_contiguous_region swiotlb: rename swiotlb_late_init_with_default_size ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/amd/init.c6
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/intel/dmar.c6
-rw-r--r--drivers/xen/swiotlb-xen.c231
4 files changed, 41 insertions, 207 deletions
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index b4a798c7b347..1a3ad58ba846 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -27,7 +27,6 @@
#include <asm/apic.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
-#include <asm/iommu_table.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
@@ -3257,11 +3256,6 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic);
__setup("ivrs_hpet", parse_ivrs_hpet);
__setup("ivrs_acpihid", parse_ivrs_acpihid);
-IOMMU_INIT_FINISH(amd_iommu_detect,
- gart_iommu_hole_init,
- NULL,
- NULL);
-
bool amd_iommu_v2_supported(void)
{
return amd_iommu_v2_present;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index a1ada7bff44e..b47220ac09ea 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1840,7 +1840,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void __init amd_iommu_init_dma_ops(void)
{
- swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
+ if (iommu_default_passthrough() || sme_me_mask)
+ x86_swiotlb_enable = true;
+ else
+ x86_swiotlb_enable = false;
}
int __init amd_iommu_init_api(void)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 4de960834a1b..592c1e1a5d4b 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -30,7 +30,6 @@
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
-#include <asm/iommu_table.h>
#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
@@ -912,7 +911,7 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
return 0;
}
-int __init detect_intel_iommu(void)
+void __init detect_intel_iommu(void)
{
int ret;
struct dmar_res_callback validate_drhd_cb = {
@@ -945,8 +944,6 @@ int __init detect_intel_iommu(void)
dmar_tbl = NULL;
}
up_write(&dmar_global_lock);
-
- return ret ? ret : 1;
}
static void unmap_iommu(struct intel_iommu *iommu)
@@ -2164,7 +2161,6 @@ static int __init dmar_free_unused_resources(void)
}
late_initcall(dmar_free_unused_resources);
-IOMMU_INIT_POST(detect_intel_iommu);
/*
* DMAR Hotplug Support
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 47aebd98f52f..67aa74d20162 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,6 @@
#include <xen/hvc-console.h>
#include <asm/dma-mapping.h>
-#include <asm/xen/page-coherent.h>
#include <trace/events/swiotlb.h>
#define MAX_DMA_BITS 32
@@ -104,7 +103,8 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
return 0;
}
-static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+#ifdef CONFIG_X86
+int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -130,223 +130,59 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
return 0;
}
-enum xen_swiotlb_err {
- XEN_SWIOTLB_UNKNOWN = 0,
- XEN_SWIOTLB_ENOMEM,
- XEN_SWIOTLB_EFIXUP
-};
-
-static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
-{
- switch (err) {
- case XEN_SWIOTLB_ENOMEM:
- return "Cannot allocate Xen-SWIOTLB buffer\n";
- case XEN_SWIOTLB_EFIXUP:
- return "Failed to get contiguous memory for DMA from Xen!\n"\
- "You either: don't have the permissions, do not have"\
- " enough free memory under 4GB, or the hypervisor memory"\
- " is too fragmented!";
- default:
- break;
- }
- return "";
-}
-
-int xen_swiotlb_init(void)
-{
- enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
- unsigned long bytes = swiotlb_size_or_default();
- unsigned long nslabs = bytes >> IO_TLB_SHIFT;
- unsigned int order, repeat = 3;
- int rc = -ENOMEM;
- char *start;
-
- if (io_tlb_default_mem.nslabs) {
- pr_warn("swiotlb buffer already initialized\n");
- return -EEXIST;
- }
-
-retry:
- m_ret = XEN_SWIOTLB_ENOMEM;
- order = get_order(bytes);
-
- /*
- * Get IO TLB memory from any location.
- */
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- start = (void *)xen_get_swiotlb_free_pages(order);
- if (start)
- break;
- order--;
- }
- if (!start)
- goto exit;
- if (order != get_order(bytes)) {
- pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
- (PAGE_SIZE << order) >> 20);
- nslabs = SLABS_PER_PAGE << order;
- bytes = nslabs << IO_TLB_SHIFT;
- }
-
- /*
- * And replace that memory with pages under 4GB.
- */
- rc = xen_swiotlb_fixup(start, nslabs);
- if (rc) {
- free_pages((unsigned long)start, order);
- m_ret = XEN_SWIOTLB_EFIXUP;
- goto error;
- }
- rc = swiotlb_late_init_with_tbl(start, nslabs);
- if (rc)
- return rc;
- swiotlb_set_max_segment(PAGE_SIZE);
- return 0;
-error:
- if (nslabs > 1024 && repeat--) {
- /* Min is 2MB */
- nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
- bytes = nslabs << IO_TLB_SHIFT;
- pr_info("Lowering to %luMB\n", bytes >> 20);
- goto retry;
- }
-exit:
- pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- return rc;
-}
-
-#ifdef CONFIG_X86
-void __init xen_swiotlb_init_early(void)
-{
- unsigned long bytes = swiotlb_size_or_default();
- unsigned long nslabs = bytes >> IO_TLB_SHIFT;
- unsigned int repeat = 3;
- char *start;
- int rc;
-
-retry:
- /*
- * Get IO TLB memory from any location.
- */
- start = memblock_alloc(PAGE_ALIGN(bytes),
- IO_TLB_SEGSIZE << IO_TLB_SHIFT);
- if (!start)
- panic("%s: Failed to allocate %lu bytes\n",
- __func__, PAGE_ALIGN(bytes));
-
- /*
- * And replace that memory with pages under 4GB.
- */
- rc = xen_swiotlb_fixup(start, nslabs);
- if (rc) {
- memblock_free(start, PAGE_ALIGN(bytes));
- if (nslabs > 1024 && repeat--) {
- /* Min is 2MB */
- nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
- bytes = nslabs << IO_TLB_SHIFT;
- pr_info("Lowering to %luMB\n", bytes >> 20);
- goto retry;
- }
- panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
- }
-
- if (swiotlb_init_with_tbl(start, nslabs, true))
- panic("Cannot allocate SWIOTLB buffer");
- swiotlb_set_max_segment(PAGE_SIZE);
-}
-#endif /* CONFIG_X86 */
-
static void *
-xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
+xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
- void *ret;
+ u64 dma_mask = dev->coherent_dma_mask;
int order = get_order(size);
- u64 dma_mask = DMA_BIT_MASK(32);
phys_addr_t phys;
- dma_addr_t dev_addr;
-
- /*
- * Ignore region specifiers - the kernel's ideas of
- * pseudo-phys memory layout has nothing to do with the
- * machine physical layout. We can't allocate highmem
- * because we can't return a pointer to it.
- */
- flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ void *ret;
- /* Convert the size to actually allocated. */
+ /* Align the allocation to the Xen page size */
size = 1UL << (order + XEN_PAGE_SHIFT);
- /* On ARM this function returns an ioremap'ped virtual address for
- * which virt_to_phys doesn't return the corresponding physical
- * address. In fact on ARM virt_to_phys only works for kernel direct
- * mapped RAM memory. Also see comment below.
- */
- ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
-
+ ret = (void *)__get_free_pages(flags, get_order(size));
if (!ret)
return ret;
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
-
- /* At this point dma_handle is the dma address, next we are
- * going to set it to the machine address.
- * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
- * to *dma_handle. */
- phys = dma_to_phys(hwdev, *dma_handle);
- dev_addr = xen_phys_to_dma(hwdev, phys);
- if (((dev_addr + size - 1 <= dma_mask)) &&
- !range_straddles_page_boundary(phys, size))
- *dma_handle = dev_addr;
- else {
- if (xen_create_contiguous_region(phys, order,
- fls64(dma_mask), dma_handle) != 0) {
- xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
- return NULL;
- }
- *dma_handle = phys_to_dma(hwdev, *dma_handle);
+ phys = virt_to_phys(ret);
+
+ *dma_handle = xen_phys_to_dma(dev, phys);
+ if (*dma_handle + size - 1 > dma_mask ||
+ range_straddles_page_boundary(phys, size)) {
+ if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
+ dma_handle) != 0)
+ goto out_free_pages;
SetPageXenRemapped(virt_to_page(ret));
}
+
memset(ret, 0, size);
return ret;
+
+out_free_pages:
+ free_pages((unsigned long)ret, get_order(size));
+ return NULL;
}
static void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr, unsigned long attrs)
+xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
+ phys_addr_t phys = virt_to_phys(vaddr);
int order = get_order(size);
- phys_addr_t phys;
- u64 dma_mask = DMA_BIT_MASK(32);
- struct page *page;
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
-
- /* do not use virt_to_phys because on ARM it doesn't return you the
- * physical address */
- phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (is_vmalloc_addr(vaddr))
- page = vmalloc_to_page(vaddr);
- else
- page = virt_to_page(vaddr);
+ if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+ return;
- if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
- range_straddles_page_boundary(phys, size)) &&
- TestClearPageXenRemapped(page))
+ if (TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
-
- xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
- attrs);
+ free_pages((unsigned long)vaddr, get_order(size));
}
+#endif /* CONFIG_X86 */
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
@@ -378,7 +214,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+ trace_swiotlb_bounced(dev, dev_addr, size);
map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
@@ -549,8 +385,13 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
+#ifdef CONFIG_X86
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
+#else
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+#endif
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,