diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2021-12-06 23:51:42 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2021-12-16 22:22:19 +0100 |
commit | a80713fea3d12344e1da18f9113c74cdb3c463f1 (patch) | |
tree | e410c5179f84072e10b6019487f32a20908e2fff /kernel/irq | |
parent | 653b50c5f96918238e0b51e783b291f6e9e692f9 (diff) | |
download | linux-stable-a80713fea3d12344e1da18f9113c74cdb3c463f1.tar.gz linux-stable-a80713fea3d12344e1da18f9113c74cdb3c463f1.tar.bz2 linux-stable-a80713fea3d12344e1da18f9113c74cdb3c463f1.zip |
platform-msi: Simplify platform device MSI code
The allocation code is overly complex. It tries to have the MSI index space
packed, which is not working when an interrupt is freed. There is no
requirement for this. The only requirement is that the MSI index is unique.
Move the MSI descriptor allocation into msi_domain_populate_irqs() and use
the Linux interrupt number as MSI index which fulfils the unique
requirement.
This requires to lock the MSI descriptors which makes the lock order
reverse to the regular MSI alloc/free functions vs. the domain
mutex. Assign a seperate lockdep class for these MSI device domains.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nishanth Menon <nm@ti.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20211206210748.956731741@linutronix.de
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/msi.c | 45 |
1 files changed, 21 insertions, 24 deletions
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index b511dc1a0219..09f34e17e891 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -731,43 +731,40 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, } int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, - int virq, int nvec, msi_alloc_info_t *arg) + int virq_base, int nvec, msi_alloc_info_t *arg) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; struct msi_desc *desc; - int ret = 0; + int ret, virq; - for_each_msi_entry(desc, dev) { - /* Don't even try the multi-MSI brain damage. */ - if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { - ret = -EINVAL; - break; + msi_lock_descs(dev); + for (virq = virq_base; virq < virq_base + nvec; virq++) { + desc = alloc_msi_entry(dev, 1, NULL); + if (!desc) { + ret = -ENOMEM; + goto fail; } - if (!(desc->irq >= virq && desc->irq < (virq + nvec))) - continue; + desc->msi_index = virq; + desc->irq = virq; + list_add_tail(&desc->list, &dev->msi.data->list); ops->set_desc(arg, desc); - /* Assumes the domain mutex is held! */ - ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1, - arg); + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); if (ret) - break; - - irq_set_msi_desc_off(desc->irq, 0, desc); - } - - if (ret) { - /* Mop up the damage */ - for_each_msi_entry(desc, dev) { - if (!(desc->irq >= virq && desc->irq < (virq + nvec))) - continue; + goto fail; - irq_domain_free_irqs_common(domain, desc->irq, 1); - } + irq_set_msi_desc(virq, desc); } + msi_unlock_descs(dev); + return 0; +fail: + for (--virq; virq >= virq_base; virq--) + irq_domain_free_irqs_common(domain, virq, 1); + msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1); + msi_unlock_descs(dev); return ret; } |