summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c209
1 files changed, 174 insertions, 35 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5a28ae892504..af18a7e7f917 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -41,13 +41,15 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -101,6 +103,10 @@
#define MSI_IOVA_LENGTH 0x100000
static int force_stage;
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param() here.
+ */
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
@@ -119,6 +125,7 @@ enum arm_smmu_implementation {
GENERIC_SMMU,
ARM_MMU500,
CAVIUM_SMMUV2,
+ QCOM_SMMUV2,
};
struct arm_smmu_s2cr {
@@ -206,6 +213,8 @@ struct arm_smmu_device {
u32 num_global_irqs;
u32 num_context_irqs;
unsigned int *irqs;
+ struct clk_bulk_data *clks;
+ int num_clks;
u32 cavium_id_base; /* Specific to Cavium */
@@ -267,6 +276,20 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
+{
+ if (pm_runtime_enabled(smmu->dev))
+ return pm_runtime_get_sync(smmu->dev);
+
+ return 0;
+}
+
+static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
+{
+ if (pm_runtime_enabled(smmu->dev))
+ pm_runtime_put(smmu->dev);
+}
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -926,11 +949,15 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- int irq;
+ int ret, irq;
if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
return;
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return;
+
/*
* Disable the context bank and free the page tables before freeing
* it.
@@ -945,6 +972,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+
+ arm_smmu_rpm_put(smmu);
}
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
@@ -1103,7 +1132,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
static int arm_smmu_master_alloc_smes(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1206,7 +1235,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1226,10 +1255,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENODEV;
smmu = fwspec_smmu(fwspec);
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return ret;
+
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (ret < 0)
- return ret;
+ goto rpm_put;
/*
* Sanity check the domain. We don't support domains across
@@ -1239,49 +1273,74 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto rpm_put;
}
/* Looks ok, so add the device to the domain */
- return arm_smmu_domain_add_master(smmu_domain, fwspec);
+ ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+
+rpm_put:
+ arm_smmu_rpm_put(smmu);
+ return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+ int ret;
if (!ops)
return -ENODEV;
- return ops->map(ops, iova, paddr, size, prot);
+ arm_smmu_rpm_get(smmu);
+ ret = ops->map(ops, iova, paddr, size, prot);
+ arm_smmu_rpm_put(smmu);
+
+ return ret;
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+ size_t ret;
if (!ops)
return 0;
- return ops->unmap(ops, iova, size);
+ arm_smmu_rpm_get(smmu);
+ ret = ops->unmap(ops, iova, size);
+ arm_smmu_rpm_put(smmu);
+
+ return ret;
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops)
+ if (smmu_domain->tlb_ops) {
+ arm_smmu_rpm_get(smmu);
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+ arm_smmu_rpm_put(smmu);
+ }
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops)
+ if (smmu_domain->tlb_ops) {
+ arm_smmu_rpm_get(smmu);
smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+ arm_smmu_rpm_put(smmu);
+ }
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -1296,6 +1355,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
u32 tmp;
u64 phys;
unsigned long va, flags;
+ int ret;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return 0;
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -1324,6 +1388,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
return 0;
}
+ arm_smmu_rpm_put(smmu);
+
return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
}
@@ -1380,7 +1446,7 @@ static int arm_smmu_add_device(struct device *dev)
{
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, ret;
if (using_legacy_binding) {
@@ -1391,7 +1457,7 @@ static int arm_smmu_add_device(struct device *dev)
* will allocate/initialise a new one. Thus we need to update fwspec for
* later use.
*/
- fwspec = dev->iommu_fwspec;
+ fwspec = dev_iommu_fwspec_get(dev);
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
@@ -1428,12 +1494,21 @@ static int arm_smmu_add_device(struct device *dev)
while (i--)
cfg->smendx[i] = INVALID_SMENDX;
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ goto out_cfg_free;
+
ret = arm_smmu_master_alloc_smes(dev);
+ arm_smmu_rpm_put(smmu);
+
if (ret)
goto out_cfg_free;
iommu_device_link(&smmu->iommu, dev);
+ device_link_add(dev, smmu->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
return 0;
out_cfg_free:
@@ -1445,10 +1520,10 @@ out_free:
static void arm_smmu_remove_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg;
struct arm_smmu_device *smmu;
-
+ int ret;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
@@ -1456,8 +1531,15 @@ static void arm_smmu_remove_device(struct device *dev)
cfg = fwspec->iommu_priv;
smmu = cfg->smmu;
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return;
+
iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(fwspec);
+
+ arm_smmu_rpm_put(smmu);
+
iommu_group_remove_device(dev);
kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev);
@@ -1465,7 +1547,7 @@ static void arm_smmu_remove_device(struct device *dev)
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
struct iommu_group *group = NULL;
int i, idx;
@@ -1947,13 +2029,14 @@ struct arm_smmu_match_data {
};
#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
-static struct arm_smmu_match_data name = { .version = ver, .model = imp }
+static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
@@ -1962,9 +2045,9 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+ { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
-MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
#ifdef CONFIG_ACPI
static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
@@ -2150,6 +2233,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->irqs[i] = irq;
}
+ err = devm_clk_bulk_get_all(dev, &smmu->clks);
+ if (err < 0) {
+ dev_err(dev, "failed to get clocks %d\n", err);
+ return err;
+ }
+ smmu->num_clks = err;
+
+ err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
+ if (err)
+ return err;
+
err = arm_smmu_device_cfg_probe(smmu);
if (err)
return err;
@@ -2200,6 +2294,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
arm_smmu_test_smr_masks(smmu);
/*
+ * We want to avoid touching dev->power.lock in fastpaths unless
+ * it's really going to do something useful - pm_runtime_enabled()
+ * can serve as an ideal proxy for that decision. So, conditionally
+ * enable pm_runtime.
+ */
+ if (dev->pm_domain) {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ /*
* For ACPI and generic DT bindings, an SMMU will be probed before
* any device which might need it, so we want the bus ops in place
* ready to handle default domain setup as soon as any SMMU exists.
@@ -2224,48 +2329,82 @@ static int arm_smmu_legacy_bus_init(void)
}
device_initcall_sync(arm_smmu_legacy_bus_init);
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
if (!smmu)
- return -ENODEV;
+ return;
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_err(&pdev->dev, "removing device with active domains!\n");
+ arm_smmu_rpm_get(smmu);
/* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
- return 0;
+ arm_smmu_rpm_put(smmu);
+
+ if (pm_runtime_enabled(smmu->dev))
+ pm_runtime_force_suspend(smmu->dev);
+ else
+ clk_bulk_disable(smmu->num_clks, smmu->clks);
+
+ clk_bulk_unprepare(smmu->num_clks, smmu->clks);
}
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
{
- arm_smmu_device_remove(pdev);
+ struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
+ if (ret)
+ return ret;
+
+ arm_smmu_device_reset(smmu);
+
+ return 0;
}
-static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
+static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
- arm_smmu_device_reset(smmu);
+ clk_bulk_disable(smmu->num_clks, smmu->clks);
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume);
+static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return arm_smmu_runtime_resume(dev);
+}
+
+static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return arm_smmu_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops arm_smmu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
+ SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
+ arm_smmu_runtime_resume, NULL)
+};
static struct platform_driver arm_smmu_driver = {
.driver = {
- .name = "arm-smmu",
- .of_match_table = of_match_ptr(arm_smmu_of_match),
- .pm = &arm_smmu_pm_ops,
+ .name = "arm-smmu",
+ .of_match_table = of_match_ptr(arm_smmu_of_match),
+ .pm = &arm_smmu_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = arm_smmu_device_probe,
- .remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
-module_platform_driver(arm_smmu_driver);
-
-MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(arm_smmu_driver);