summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-11-10 11:32:25 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2011-11-10 11:40:37 +0100
commit5009065d38c95455bd2d27c2838313e3dd0c5bc7 (patch)
treea957af07e95166bcc014b3f333234fe74cef790e /drivers/iommu
parent1ea6b8f48918282bdca0b32a34095504ee65bab5 (diff)
downloadlinux-5009065d38c95455bd2d27c2838313e3dd0c5bc7.tar.gz
linux-5009065d38c95455bd2d27c2838313e3dd0c5bc7.tar.bz2
linux-5009065d38c95455bd2d27c2838313e3dd0c5bc7.zip
iommu/core: stop converting bytes to page order back and forth
Express sizes in bytes rather than in page order, to eliminate the size->order->size conversions we have whenever the IOMMU API is calling the low level drivers' map/unmap methods. Adopt all existing drivers. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Cc: David Brown <davidb@codeaurora.org> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <Joerg.Roedel@amd.com> Cc: Stepan Moskovchenko <stepanm@codeaurora.org> Cc: KyongHo Cho <pullip.cho@samsung.com> Cc: Hiroshi DOYU <hdoyu@nvidia.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c13
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/iommu/msm_iommu.c19
-rw-r--r--drivers/iommu/omap-iommu.c14
5 files changed, 26 insertions, 39 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a8521a..a3b7072e86e2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2702,9 +2702,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- unsigned long page_size = 0x1000UL << gfp_order;
struct protection_domain *domain = dom->priv;
int prot = 0;
int ret;
@@ -2721,13 +2720,11 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
return ret;
}
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ size_t page_size)
{
struct protection_domain *domain = dom->priv;
- unsigned long page_size, unmap_size;
-
- page_size = 0x1000UL << gfp_order;
+ size_t unmap_size;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +2732,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
domain_flush_tlb_pde(domain);
- return get_order(unmap_size);
+ return unmap_size;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c46..2a165010a1c1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3979,12 +3979,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
+ size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int prot = 0;
- size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3994,7 +3993,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
@@ -4017,11 +4015,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
int order;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4027,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return order;
+ return PAGE_SIZE << order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..7a2953d8f12e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -168,13 +168,13 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
BUG_ON(!IS_ALIGNED(iova | paddr, size));
- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+ return domain->ops->map(domain, iova, paddr, size, prot);
}
EXPORT_SYMBOL_GPL(iommu_map);
int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
{
- size_t size;
+ size_t size, unmapped;
if (unlikely(domain->ops->unmap == NULL))
return -ENODEV;
@@ -183,6 +183,8 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
BUG_ON(!IS_ALIGNED(iova, size));
- return domain->ops->unmap(domain, iova, gfp_order);
+ unmapped = domain->ops->unmap(domain, iova, size);
+
+ return get_order(unmapped);
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f9..13718d958da8 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -352,7 +352,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t len, int prot)
{
struct msm_priv *priv;
unsigned long flags;
@@ -363,7 +363,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- size_t len = 0x1000UL << order;
int ret = 0, tex, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +462,8 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
{
struct msm_priv *priv;
unsigned long flags;
@@ -474,7 +473,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_table;
unsigned long *sl_pte;
unsigned long sl_offset;
- size_t len = 0x1000UL << order;
int i, ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +542,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
ret = __flush_iotlb(domain);
- /*
- * the IOMMU API requires us to return the order of the unmapped
- * page (on success).
- */
- if (!ret)
- ret = order;
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf7587..ad80b1d0d099 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1019,12 +1019,11 @@ static void iopte_cachep_ctor(void *iopte)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t bytes = PAGE_SIZE << order;
struct iotlb_entry e;
int omap_pgsz;
u32 ret, flags;
@@ -1049,19 +1048,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return ret;
}
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+ size_t size)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t unmap_size;
- dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
+ dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- unmap_size = iopgtable_clear_entry(oiommu, da);
-
- return unmap_size ? get_order(unmap_size) : -EINVAL;
+ return iopgtable_clear_entry(oiommu, da);
}
static int