summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-01-12 17:51:15 +0000
committerJoerg Roedel <jroedel@suse.de>2015-01-19 14:55:22 +0100
commit1b72250076dde4276acecf3a7da722b185703e78 (patch)
tree86711d7e91dfb6ad914c23a18e8a12f5f30169ee /drivers
parent85b4545629663486b7f71047ce3b54fa0ad3eb28 (diff)
downloadlinux-stable-1b72250076dde4276acecf3a7da722b185703e78.tar.gz
linux-stable-1b72250076dde4276acecf3a7da722b185703e78.tar.bz2
linux-stable-1b72250076dde4276acecf3a7da722b185703e78.zip
iommu: Make IOVA domain low limit flexible
To share the IOVA allocator with other architectures, it needs to accommodate more general aperture restrictions; move the lower limit from a compile-time constant to a runtime domain property to allow IOVA domains with different requirements to co-exist. Also reword the slightly unclear description of alloc_iova since we're touching it anyway. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/iommu/iova.c10
2 files changed, 12 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index e758d8ed8fb5..86f9e82b015b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -71,6 +71,9 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@@ -1632,7 +1635,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
- init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1690,7 +1693,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@@ -4313,7 +4316,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 520b8c8ae0c4..a3dbba8caa19 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -55,11 +55,13 @@ void free_iova_mem(struct iova *iova)
}
void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
+init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn,
+ unsigned long pfn_32bit)
{
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
+ iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
@@ -162,7 +164,7 @@ move_left:
if (!curr) {
if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn);
- if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+ if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
@@ -237,8 +239,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
* @size_aligned: - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
+ * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size).
*/