From 0ce4a85f4f015d13a6a6b4954f4e8e63cd83f7b1 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Mon, 26 Aug 2019 16:50:56 +0800 Subject: Revert "iommu/vt-d: Avoid duplicated pci dma alias consideration" This reverts commit 557529494d79f3f1fadd486dd18d2de0b19be4da. Commit 557529494d79f ("iommu/vt-d: Avoid duplicated pci dma alias consideration") aimed to address a NULL pointer deference issue happened when a thunderbolt device driver returned unexpectedly. Unfortunately, this change breaks a previous pci quirk added by commit cc346a4714a59 ("PCI: Add function 1 DMA alias quirk for Marvell devices"), as the result, devices like Marvell 88SE9128 SATA controller doesn't work anymore. We will continue to try to find the real culprit mentioned in 557529494d79f, but for now we should revert it to fix current breakage. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204627 Cc: Stijn Tintel Cc: Petr Vandrovec Reported-by: Stijn Tintel Reported-by: Petr Vandrovec Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 55 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12d094d08c0a..c4e0e4a9ee9e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); +static void domain_context_clear(struct intel_iommu *iommu, + struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -2105,9 +2107,26 @@ out_unlock: return ret; } +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct pasid_table *table; +}; + +static int domain_context_mapping_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct domain_context_mapping_data *data = opaque; + + return domain_context_mapping_one(data->domain, data->iommu, + data->table, PCI_BUS_NUM(alias), + alias & 0xff); +} + static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { + struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - return domain_context_mapping_one(domain, iommu, table, bus, devfn); + + if (!dev_is_pci(dev)) + return domain_context_mapping_one(domain, iommu, table, + bus, devfn); + + data.domain = domain; + data.iommu = iommu; + data.table = table; + + return pci_for_each_dma_alias(to_pci_dev(dev), + &domain_context_mapping_cb, &data); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -4759,6 +4788,28 @@ out_free_dmar: return ret; } +static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + return 0; +} + +/* + * NB - intel-iommu lacks any sort of reference counting for the users of + * dependent devices. If multiple endpoints have intersecting dependent + * devices, unbinding the driver from any one of them will possibly leave + * the others unable to operate. + */ +static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || !dev || !dev_is_pci(dev)) + return; + + pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); +} + static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear_one(iommu, info->bus, info->devfn); + domain_context_clear(iommu, info->dev); intel_pasid_free_table(info->dev); } -- cgit v1.2.3 From 8744daf4b0699b724ee0a56b313a6c0c4ea289e3 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 26 Aug 2019 08:53:29 -0700 Subject: iommu/vt-d: Remove global page flush support Global pages support is removed from VT-d spec 3.0. Since global pages G flag only affects first-level paging structures and because DMA request with PASID are only supported by VT-d spec. 3.0 and onward, we can safely remove global pages support. For kernel shared virtual address IOTLB invalidation, PASID granularity and page selective within PASID will be used. There is no global granularity supported. Without this fix, IOTLB invalidation will cause invalid descriptor error in the queued invalidation (QI) interface. Fixes: 1c4f88b7f1f9 ("iommu/vt-d: Shared virtual address in scalable mode") Reported-by: Sanjay K Kumar Signed-off-by: Jacob Pan Signed-off-by: Joerg Roedel --- drivers/iommu/intel-svm.c | 36 +++++++++++++++--------------------- include/linux/intel-iommu.h | 3 --- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 780de0caafe8..9b159132405d 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) } static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, - unsigned long address, unsigned long pages, int ih, int gl) + unsigned long address, unsigned long pages, int ih) { struct qi_desc desc; - if (pages == -1) { - /* For global kernel pages we have to flush them in *all* PASIDs - * because that's the only option the hardware gives us. Despite - * the fact that they are actually only accessible through one. */ - if (gl) - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | - QI_EIOTLB_TYPE; - else - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; + /* + * Do PASID granu IOTLB invalidation if page selective capability is + * not available. + */ + if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | + QI_EIOTLB_DID(sdev->did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; desc.qw1 = 0; } else { int mask = ilog2(__roundup_pow_of_two(pages)); @@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; desc.qw1 = QI_EIOTLB_ADDR(address) | - QI_EIOTLB_GL(gl) | QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); } @@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d } static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, - unsigned long pages, int ih, int gl) + unsigned long pages, int ih) { struct intel_svm_dev *sdev; rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) - intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); + intel_flush_svm_range_dev(svm, sdev, address, pages, ih); rcu_read_unlock(); } @@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, start, - (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); + (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); } static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) @@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); } rcu_read_unlock(); @@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) * large and has to be physically contiguous. So it's * hard to be as defensive as we might like. */ intel_pasid_tear_down_entry(iommu, dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index f2ae8a006ff8..4fc6454f7ebb 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -346,7 +346,6 @@ enum { #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) #define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) @@ -378,8 +377,6 @@ enum { #define QI_RESP_INVALID 0x1 #define QI_RESP_FAILURE 0xf -#define QI_GRAN_ALL_ALL 0 -#define QI_GRAN_NONG_ALL 1 #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 -- cgit v1.2.3 From 36b7200f67dfe75b416b5281ed4ace9927b513bc Mon Sep 17 00:00:00 2001 From: Stuart Hayes Date: Thu, 5 Sep 2019 12:09:48 -0500 Subject: iommu/amd: Flush old domains in kdump kernel When devices are attached to the amd_iommu in a kdump kernel, the old device table entries (DTEs), which were copied from the crashed kernel, will be overwritten with a new domain number. When the new DTE is written, the IOMMU is told to flush the DTE from its internal cache--but it is not told to flush the translation cache entries for the old domain number. Without this patch, AMD systems using the tg3 network driver fail when kdump tries to save the vmcore to a network system, showing network timeouts and (sometimes) IOMMU errors in the kernel log. This patch will flush IOMMU translation cache entries for the old domain when a DTE gets overwritten with a new domain number. Signed-off-by: Stuart Hayes Fixes: 3ac3e5ee5ed5 ('iommu/amd: Copy old trans table from old kernel') Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..f853b96ee547 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) +{ + struct iommu_cmd cmd; + + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1873,6 +1884,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, { u64 pte_root = 0; u64 flags = 0; + u32 old_domid; if (domain->mode != PAGE_MODE_NONE) pte_root = iommu_virt_to_phys(domain->pt_root); @@ -1922,8 +1934,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, flags &= ~DEV_DOMID_MASK; flags |= domain->id; + old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; amd_iommu_dev_table[devid].data[1] = flags; amd_iommu_dev_table[devid].data[0] = pte_root; + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } static void clear_dte_entry(u16 devid) -- cgit v1.2.3 From 754265bcab78a9014f0f99cd35e0d610fcd7dfa7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 6 Sep 2019 10:39:54 +0200 Subject: iommu/amd: Fix race in increase_address_space() After the conversion to lock-less dma-api call the increase_address_space() function can be called without any locking. Multiple CPUs could potentially race for increasing the address space, leading to invalid domain->mode settings and invalid page-tables. This has been happening in the wild under high IO load and memory pressure. Fix the race by locking this operation. The function is called infrequently so that this does not introduce a performance regression in the dma-api path again. Reported-by: Qian Cai Fixes: 256e4621c21a ('iommu/amd: Make use of the generic IOVA allocator') Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f853b96ee547..61de81965c44 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1435,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain) * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static void increase_address_space(struct protection_domain *domain, gfp_t gfp) { + unsigned long flags; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) /* address space already 64 bit large */ - return false; + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); @@ -1454,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain, domain->mode += 1; domain->updated = true; - return true; +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return; } static u64 *alloc_pte(struct protection_domain *domain, -- cgit v1.2.3