summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-05-11 01:42:04 -0300
committerJoerg Roedel <jroedel@suse.de>2023-05-23 08:15:53 +0200
commitd257344c661950986e6129407f7169f54e0bb4cf (patch)
tree2add8b09ec079106511c09306c1eb612b2ce88d2 /drivers/iommu
parent4c8ad9da05662141928fe4ed001d3775fd95221c (diff)
downloadlinux-stable-d257344c661950986e6129407f7169f54e0bb4cf.tar.gz
linux-stable-d257344c661950986e6129407f7169f54e0bb4cf.tar.bz2
linux-stable-d257344c661950986e6129407f7169f54e0bb4cf.zip
iommu: Replace __iommu_group_dma_first_attach() with set_domain
Reorganize the attach_deferred logic to set dev->iommu->attach_deferred immediately during probe and then have __iommu_device_set_domain() check it and not attach the default_domain. This is to prepare for removing the group->domain set from iommu_group_alloc_default_domain() by calling __iommu_group_set_domain() to set the group->domain. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Heiko Stuebner <heiko@sntech.de> Tested-by: Niklas Schnelle <schnelle@linux.ibm.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/6-v5-1b99ae392328+44574-iommu_err_unwind_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iommu.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e0bfb114d08d..eaa63fe887f9 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -365,6 +365,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
dev->iommu->iommu_dev = iommu_dev;
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
+ if (ops->is_attach_deferred)
+ dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) {
@@ -399,27 +401,14 @@ err_unlock:
return ret;
}
-static bool iommu_is_attach_deferred(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->is_attach_deferred)
- return ops->is_attach_deferred(dev);
-
- return false;
-}
-
static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
lockdep_assert_held(&dev->iommu_group->mutex);
- if (iommu_is_attach_deferred(dev)) {
- dev->iommu->attach_deferred = 1;
+ if (dev->iommu->attach_deferred)
return 0;
- }
-
return __iommu_attach_device(domain, dev);
}
@@ -1831,12 +1820,6 @@ static void probe_alloc_default_domain(const struct bus_type *bus,
}
-static int __iommu_group_dma_first_attach(struct iommu_group *group)
-{
- return __iommu_group_for_each_dev(group, group->default_domain,
- iommu_group_do_dma_first_attach);
-}
-
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
@@ -1899,7 +1882,8 @@ int bus_iommu_probe(const struct bus_type *bus)
iommu_group_create_direct_mappings(group);
- ret = __iommu_group_dma_first_attach(group);
+ group->domain = NULL;
+ ret = __iommu_group_set_domain(group, group->default_domain);
mutex_unlock(&group->mutex);
@@ -2200,6 +2184,12 @@ static int __iommu_device_set_domain(struct iommu_group *group,
{
int ret;
+ if (dev->iommu->attach_deferred) {
+ if (new_domain == group->default_domain)
+ return 0;
+ dev->iommu->attach_deferred = 0;
+ }
+
ret = __iommu_attach_device(new_domain, dev);
if (ret) {
/*