summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/tegra-smmu.c
diff options
context:
space:
mode:
authorDmitry Osipenko <digetx@gmail.com>2020-09-01 23:37:30 +0300
committerJoerg Roedel <jroedel@suse.de>2020-09-04 14:27:18 +0200
commit404d0b308e4f730b1c9b2f33e84a6f7069db94c5 (patch)
tree7bd91b55021629866da36992951268ddd731ce62 /drivers/iommu/tegra-smmu.c
parent1ea5440e36a792d01aae0b22159e0a13b33589fe (diff)
downloadlinux-404d0b308e4f730b1c9b2f33e84a6f7069db94c5.tar.gz
linux-404d0b308e4f730b1c9b2f33e84a6f7069db94c5.tar.bz2
linux-404d0b308e4f730b1c9b2f33e84a6f7069db94c5.zip
iommu/tegra-smmu: Add locking around mapping operations
The mapping operations of the Tegra SMMU driver are subjected to a race condition issues because SMMU Address Space isn't allocated and freed atomically, while it should be. This patch makes the mapping operations atomic, it fixes an accidentally released Host1x Address Space problem which happens while running multiple graphics tests in parallel on Tegra30, i.e. by having multiple threads racing with each other in the Host1x's submission and completion code paths, performing IOVA mappings and unmappings in parallel. Signed-off-by: Dmitry Osipenko <digetx@gmail.com> Tested-by: Thierry Reding <treding@nvidia.com> Acked-by: Thierry Reding <treding@nvidia.com> Link: https://lore.kernel.org/r/20200901203730.27865-1-digetx@gmail.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r--drivers/iommu/tegra-smmu.c95
1 files changed, 84 insertions, 11 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2574e716086b..046add7acb61 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
@@ -50,6 +51,7 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
+ spinlock_t lock;
u32 *count;
struct page **pts;
struct page *pd;
@@ -309,6 +311,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
+ spin_lock_init(&as->lock);
+
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -570,19 +574,14 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap)
+ dma_addr_t *dmap, struct page *page)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
if (!as->pts[pde]) {
- struct page *page;
dma_addr_t dma;
- page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
- if (!page)
- return NULL;
-
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
@@ -656,15 +655,61 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static struct page *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
+{
+ unsigned int pde = iova_pd_index(iova);
+ struct page *page = as->pts[pde];
+
+ /* at first check whether allocation needs to be done at all */
+ if (page)
+ return page;
+
+ /*
+ * In order to prevent exhaustion of the atomic memory pool, we
+ * allocate page in a sleeping context if GFP flags permit. Hence
+ * spinlock needs to be unlocked and re-locked after allocation.
+ */
+ if (!(gfp & __GFP_ATOMIC))
+ spin_unlock_irqrestore(&as->lock, *flags);
+
+ page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
+
+ if (!(gfp & __GFP_ATOMIC))
+ spin_lock_irqsave(&as->lock, *flags);
+
+ /*
+ * In a case of blocking allocation, a concurrent mapping may win
+ * the PDE allocation. In this case the allocated page isn't needed
+ * if allocation succeeded and the allocation failure isn't fatal.
+ */
+ if (as->pts[pde]) {
+ if (page)
+ __free_page(page);
+
+ page = as->pts[pde];
+ }
+
+ return page;
+}
+
+static int
+__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
+ unsigned long *flags)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ struct page *page;
u32 pte_attrs;
u32 *pte;
- pte = as_get_pte(as, iova, &pte_dma);
+ page = as_get_pde_page(as, iova, gfp, flags);
+ if (!page)
+ return -ENOMEM;
+
+ pte = as_get_pte(as, iova, &pte_dma, page);
if (!pte)
return -ENOMEM;
@@ -686,8 +731,9 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
return 0;
}
-static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t
+__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
@@ -703,6 +749,33 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
+static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&as->lock, flags);
+ ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return ret;
+}
+
+static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ size = __tegra_smmu_unmap(domain, iova, size, gather);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return size;
+}
+
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{