diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-03-22 16:22:40 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2018-03-29 10:38:16 +0200 |
commit | 993ca6e063a69a0c65ca42ed449b6bc1b3844151 (patch) | |
tree | 9a503357e65658e3128f643031fa33d3ae360796 /drivers/iommu | |
parent | 2fcc1e8ac4a8514c64f946178fc36c2e30e56a41 (diff) | |
download | linux-stable-993ca6e063a69a0c65ca42ed449b6bc1b3844151.tar.gz linux-stable-993ca6e063a69a0c65ca42ed449b6bc1b3844151.tar.bz2 linux-stable-993ca6e063a69a0c65ca42ed449b6bc1b3844151.zip |
iommu/amd: Drop the lock while allocating new irq remap table
The irq_remap_table is allocated while the iommu_table_lock is held with
interrupts disabled.
>From looking at the call sites, all callers are in the early device
initialisation (apic_bsp_setup(), pci_enable_device(),
pci_enable_msi()) so make sense to drop the lock which also enables
interrupts and try to allocate that memory with GFP_KERNEL instead
GFP_ATOMIC.
Since during the allocation the iommu_table_lock is dropped, we need to
recheck if table exists after the lock has been reacquired. I *think*
that it is impossible that the "devid" entry appears in irq_lookup_table
while the lock is dropped since the same device can only be probed once.
However I check for both cases, just to be sure.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 63 |
1 files changed, 45 insertions, 18 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 11ea2d656be8..c493d345b3ef 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3617,6 +3617,30 @@ static struct irq_remap_table *get_irq_table(u16 devid) return table; } +static struct irq_remap_table *__alloc_irq_table(void) +{ + struct irq_remap_table *table; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return NULL; + + table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); + if (!table->table) { + kfree(table); + return NULL; + } + raw_spin_lock_init(&table->lock); + + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + memset(table->table, 0, + MAX_IRQS_PER_TABLE * sizeof(u32)); + else + memset(table->table, 0, + (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); + return table; +} + static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, struct irq_remap_table *table) { @@ -3628,6 +3652,7 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, static struct irq_remap_table *alloc_irq_table(u16 devid) { struct irq_remap_table *table = NULL; + struct irq_remap_table *new_table = NULL; struct amd_iommu *iommu; unsigned long flags; u16 alias; @@ -3646,42 +3671,44 @@ static struct irq_remap_table *alloc_irq_table(u16 devid) table = irq_lookup_table[alias]; if (table) { set_remap_table_entry(iommu, devid, table); - goto out; + goto out_wait; } + spin_unlock_irqrestore(&iommu_table_lock, flags); /* Nothing there yet, allocate new irq remapping table */ - table = kzalloc(sizeof(*table), GFP_ATOMIC); - if (!table) - goto out_unlock; + new_table = __alloc_irq_table(); + if (!new_table) + return NULL; - /* Initialize table spin-lock */ - raw_spin_lock_init(&table->lock); + spin_lock_irqsave(&iommu_table_lock, flags); - table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC); - if (!table->table) { - kfree(table); - table = NULL; + table = irq_lookup_table[devid]; + if (table) goto out_unlock; - } - if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) - memset(table->table, 0, - MAX_IRQS_PER_TABLE * sizeof(u32)); - else - memset(table->table, 0, - (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); + table = irq_lookup_table[alias]; + if (table) { + set_remap_table_entry(iommu, devid, table); + goto out_wait; + } + table = new_table; + new_table = NULL; set_remap_table_entry(iommu, devid, table); if (devid != alias) set_remap_table_entry(iommu, alias, table); -out: +out_wait: iommu_completion_wait(iommu); out_unlock: spin_unlock_irqrestore(&iommu_table_lock, flags); + if (new_table) { + kmem_cache_free(amd_iommu_irq_cache, new_table->table); + kfree(new_table); + } return table; } |