summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-07-16 19:40:12 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2015-07-28 15:47:56 +0100
commit8f6429c7cb59f28433253575cc8e3262eed63592 (patch)
treeb0ba92c894735701a01d289f92c42c221c6d3cf1 /drivers
parent52721d9d3334c1cb1f76219a161084094ec634dc (diff)
downloadlinux-8f6429c7cb59f28433253575cc8e3262eed63592.tar.gz
linux-8f6429c7cb59f28433253575cc8e3262eed63592.tar.bz2
linux-8f6429c7cb59f28433253575cc8e3262eed63592.zip
iommu/iova: Avoid over-allocating when size-aligned
Currently, allocating a size-aligned IOVA region quietly adjusts the actual allocation size in the process, returning a rounded-up power-of-two-sized allocation. This results in mismatched behaviour in the IOMMU driver if the original size was not a power of two, where the original size is mapped, but the rounded-up IOVA size is unmapped. Whilst some IOMMUs will happily unmap already-unmapped pages, others consider this an error, so fix it by computing the necessary alignment padding without altering the actual allocation size. Also clean up by making pad_size unsigned, since its callers always pass unsigned values and negative padding makes little sense here anyway. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iova.c23
2 files changed, 8 insertions, 17 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..92101597cede 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3233,6 +3233,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+ /* Ensure we reserve the whole size-aligned region */
+ nrpages = __roundup_pow_of_two(nrpages);
if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..29f2efcf668e 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -120,19 +120,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
}
}
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
*/
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{
- unsigned int pad_size = 0;
- unsigned int order = ilog2(size);
-
- if (order)
- pad_size = (limit_pfn + 1) % (1 << order);
-
- return pad_size;
+ return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
}
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -265,12 +260,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova)
return NULL;
- /* If size aligned is set then round the size to
- * to next power of two.
- */
- if (size_aligned)
- size = __roundup_pow_of_two(size);
-
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);