From 6243f572a18db99607f29517b2d6b4209356b9fa Mon Sep 17 00:00:00 2001 From: Chen Jun Date: Fri, 23 Oct 2020 06:48:27 +0000 Subject: iommu: Modify the description of iommu_sva_unbind_device iommu_sva_unbind_device has no return value. Remove the description of the return value of the function. Signed-off-by: Chen Jun Link: https://lore.kernel.org/r/20201023064827.74794-1-chenjun102@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b53446bb8c6b..88b0c9192d8c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2995,8 +2995,6 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device); * Put reference to a bond between device and address space. The device should * not be issuing any more transaction for this PASID. All outstanding page * requests for this PASID must have been flushed to the IOMMU. - * - * Returns 0 on success, or an error value */ void iommu_sva_unbind_device(struct iommu_sva *handle) { -- cgit v1.2.3 From 058236eef606ea53ea7317afc20e9469cf3c3b91 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 26 Nov 2020 21:38:25 +0800 Subject: iommu: return error code when it can't get group Although iommu_group_get() in iommu_probe_device() will always succeed thanks to __iommu_probe_device() creating the group if it's not present, it's still worth initialising 'ret' to -ENODEV in case this path is reachable in the future. For now, this patch results in no functional change. Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Link: https://lore.kernel.org/r/20201126133825.3643852-1-yangyingliang@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 88b0c9192d8c..dd617ed854a3 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -253,8 +253,10 @@ int iommu_probe_device(struct device *dev) goto err_out; group = iommu_group_get(dev); - if (!group) + if (!group) { + ret = -ENODEV; goto err_release; + } /* * Try to allocate a default domain - needs support from the -- cgit v1.2.3 From 093b32a849b336b5b48bdde1041fc06f91ae475c Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 7 Dec 2020 17:35:53 +0800 Subject: iommu: Improve the performance for direct_mapping Currently direct_mapping always use the smallest pgsize which is SZ_4K normally to mapping. This is unnecessary. we could gather the size, and call iommu_map then, iommu_map could decide how to map better with the just right pgsize. >From the original comment, we should take care overlap, otherwise, iommu_map may return -EEXIST. In this overlap case, we should map the previous region before overlap firstly. then map the left part. Each a iommu device will call this direct_mapping when its iommu initialize, This patch is effective to improve the boot/initialization time especially while it only needs level 1 mapping. Signed-off-by: Anan Sun Signed-off-by: Yong Wu Link: https://lore.kernel.org/r/20201207093553.8635-1-yong.wu@mediatek.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index dd617ed854a3..ae70a313c090 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -739,6 +739,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, /* We need to consider overlapping regions for different devices */ list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; + size_t map_size = 0; if (domain->ops->apply_resv_region) domain->ops->apply_resv_region(dev, domain, entry); @@ -750,16 +751,27 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, entry->type != IOMMU_RESV_DIRECT_RELAXABLE) continue; - for (addr = start; addr < end; addr += pg_size) { + for (addr = start; addr <= end; addr += pg_size) { phys_addr_t phys_addr; + if (addr == end) + goto map_end; + phys_addr = iommu_iova_to_phys(domain, addr); - if (phys_addr) + if (!phys_addr) { + map_size += pg_size; continue; + } - ret = iommu_map(domain, addr, addr, pg_size, entry->prot); - if (ret) - goto out; +map_end: + if (map_size) { + ret = iommu_map(domain, addr - map_size, + addr - map_size, map_size, + entry->prot); + if (ret) + goto out; + map_size = 0; + } } } -- cgit v1.2.3 From f12e0d22903e8fb653168efa67ae3308712ea97e Mon Sep 17 00:00:00 2001 From: Keqian Zhu Date: Mon, 7 Dec 2020 19:57:58 +0800 Subject: iommu: Defer the early return in arm_(v7s/lpae)_map Although handling a mapping request with no permissions is a trivial no-op, defer the early return until after the size/range checks so that we are consistent with other mapping requests. Signed-off-by: Keqian Zhu Link: https://lore.kernel.org/r/20201207115758.9400-1-zhukeqian1@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 8 ++++---- drivers/iommu/io-pgtable-arm.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a688f22cbe3b..359b96b0fa3e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -522,14 +522,14 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, struct io_pgtable *iop = &data->iop; int ret; - /* If no access, then nothing to do */ - if (!(prot & (IOMMU_READ | IOMMU_WRITE))) - return 0; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; + /* If no access, then nothing to do */ + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); /* * Synchronise all PTE updates for the new mapping before there's diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a7a9bc08dcd1..8ade72adab31 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -444,10 +444,6 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte prot; long iaext = (s64)iova >> cfg->ias; - /* If no access, then nothing to do */ - if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) - return 0; - if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return -EINVAL; @@ -456,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iaext || paddr >> cfg->oas)) return -ERANGE; + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + prot = arm_lpae_prot_to_pte(data, iommu_prot); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); /* -- cgit v1.2.3 From f37eb48466d2ef4de33207f7389716d1734d9710 Mon Sep 17 00:00:00 2001 From: Kunkun Jiang Date: Mon, 7 Dec 2020 20:01:50 +0800 Subject: iommu/io-pgtable-arm: Remove unused 'level' parameter from iopte_type() macro The 'level' parameter to the iopte_type() macro is unused, so remove it. Signed-off-by: Kunkun Jiang Link: https://lore.kernel.org/r/20201207120150.1891-1-jiangkunkun@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 8ade72adab31..f0d1d4c08ca0 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -130,7 +130,7 @@ /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) -#define iopte_type(pte,l) \ +#define iopte_type(pte) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) @@ -151,9 +151,9 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, enum io_pgtable_fmt fmt) { if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) - return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE; + return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; - return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK; + return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; } static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, @@ -280,7 +280,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; - } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { + } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) { /* * We need to unmap and free the old table before * overwriting it with a block entry. @@ -548,7 +548,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, * block, but anything else is invalid. We can't misinterpret * a page entry here since we're never at the last level. */ - if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) + if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) return 0; tablep = iopte_deref(pte, data); -- cgit v1.2.3