summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-20 17:02:44 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 14:16:28 +0100
commit09b4280439ef6fdc55f1353a9135034336eb5d26 (patch)
tree835ac6e2b3c488f8674a7fa6d67a1cb9f415d834 /arch/x86/kernel/amd_iommu.c
parente3306664eb307ae4cc93211cd9f12d0dbd49de65 (diff)
downloadlinux-09b4280439ef6fdc55f1353a9135034336eb5d26.tar.gz
linux-09b4280439ef6fdc55f1353a9135034336eb5d26.tar.bz2
linux-09b4280439ef6fdc55f1353a9135034336eb5d26.zip
x86/amd-iommu: Reimplement flush_all_domains_on_iommu()
This patch reimplements the function flush_all_domains_on_iommu to use the global protection domain list. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 5141f5608c5c..a1bd99d390ab 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
}
/*
- * This function flushes one domain on one IOMMU
+ * This function flushes all domains that have devices on the given IOMMU
*/
-static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
+static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
{
- struct iommu_cmd cmd;
+ u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ struct protection_domain *domain;
unsigned long flags;
- __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- domid, 1, 1);
-
- spin_lock_irqsave(&iommu->lock, flags);
- __iommu_queue_command(iommu, &cmd);
- __iommu_completion_wait(iommu);
- __iommu_wait_for_completion(iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
-{
- int i;
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- for (i = 1; i < MAX_DOMAIN_ID; ++i) {
- if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
+ list_for_each_entry(domain, &amd_iommu_pd_list, list) {
+ if (domain->dev_iommu[iommu->index] == 0)
continue;
- flush_domain_on_iommu(iommu, i);
+
+ spin_lock(&domain->lock);
+ iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
+ iommu_flush_complete(domain);
+ spin_unlock(&domain->lock);
}
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
+/*
+ * This function uses heavy locking and may disable irqs for some time. But
+ * this is no issue because it is only called during resume.
+ */
void amd_iommu_flush_all_domains(void)
{
struct protection_domain *domain;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
list_for_each_entry(domain, &amd_iommu_pd_list, list) {
+ spin_lock(&domain->lock);
iommu_flush_tlb_pde(domain);
iommu_flush_complete(domain);
+ spin_unlock(&domain->lock);
}
+
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
static void flush_all_devices_for_iommu(struct amd_iommu *iommu)