summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_irq.c10
-rw-r--r--drivers/parisc/dino.c47
-rw-r--r--drivers/parisc/lba_pci.c72
-rw-r--r--drivers/pci/access.c76
-rw-r--r--drivers/pci/ats.c17
-rw-r--r--drivers/pci/bus.c32
-rw-r--r--drivers/pci/iov.c16
-rw-r--r--drivers/pci/msi.c43
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci.c153
-rw-r--r--drivers/pci/pci.h10
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/probe.c68
-rw-r--r--drivers/pci/remove.c10
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pnp/quirks.c42
-rw-r--r--drivers/scsi/ipr.c67
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/uio/uio_pci_generic.c78
19 files changed, 550 insertions, 203 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7f9eba9a0b02..0eefa12e648c 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -487,10 +487,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
else
link_desc[0] = '\0';
- dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
- pin_name(pin), link_desc, gsi,
- (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
+ pin_name(pin), link_desc, gsi,
+ (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
+ (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
return 0;
}
@@ -524,6 +524,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
* (e.g. PCI_UNDEFINED_IRQ).
*/
- dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
+ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
acpi_unregister_gsi(gsi);
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index bcd5d54b7d4d..7ff10c1e8664 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -562,19 +562,6 @@ dino_fixup_bus(struct pci_bus *bus)
/* Firmware doesn't set up card-mode dino, so we have to */
if (is_card_dino(&dino_dev->hba.dev->id)) {
dino_card_setup(bus, dino_dev->hba.base_addr);
- } else if(bus->parent == NULL) {
- /* must have a dino above it, reparent the resources
- * into the dino window */
- int i;
- struct resource *res = &dino_dev->hba.lmmio_space;
-
- bus->resource[0] = &(dino_dev->hba.io_space);
- for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
- if(res[i].flags == 0)
- break;
- bus->resource[i+1] = &res[i];
- }
-
} else if (bus->parent) {
int i;
@@ -927,6 +914,7 @@ static int __init dino_probe(struct parisc_device *dev)
const char *version = "unknown";
char *name;
int is_cujo = 0;
+ LIST_HEAD(resources);
struct pci_bus *bus;
unsigned long hpa = dev->hpa.start;
@@ -1003,26 +991,37 @@ static int __init dino_probe(struct parisc_device *dev)
dev->dev.platform_data = dino_dev;
+ pci_add_resource(&resources, &dino_dev->hba.io_space);
+ if (dino_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.lmmio_space);
+ if (dino_dev->hba.elmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.elmmio_space);
+ if (dino_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.gmmio_space);
+
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
- dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
- dino_current_bus, &dino_cfg_ops, NULL);
-
- if(bus) {
- /* This code *depends* on scanning being single threaded
- * if it isn't, this global bus number count will fail
- */
- dino_current_bus = bus->subordinate + 1;
- pci_bus_assign_resources(bus);
- pci_bus_add_devices(bus);
- } else {
+ dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev,
+ dino_current_bus, &dino_cfg_ops, NULL, &resources);
+ if (!bus) {
printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
+ pci_free_resource_list(&resources);
/* increment the bus number in case of duplicates */
dino_current_bus++;
+ return 0;
}
+
+ bus->subordinate = pci_scan_child_bus(bus);
+
+ /* This code *depends* on scanning being single threaded
+ * if it isn't, this global bus number count will fail
+ */
+ dino_current_bus = bus->subordinate + 1;
+ pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 3aeb3279c92a..d5f3d753a108 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -653,7 +653,7 @@ lba_fixup_bus(struct pci_bus *bus)
}
} else {
/* Host-PCI Bridge */
- int err, i;
+ int err;
DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
ldev->hba.io_space.name,
@@ -669,9 +669,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&ioport_resource, 2);
BUG();
}
- /* advertize Host bridge resources to PCI bus */
- bus->resource[0] = &(ldev->hba.io_space);
- i = 1;
if (ldev->hba.elmmio_space.start) {
err = request_resource(&iomem_resource,
@@ -685,35 +682,17 @@ lba_fixup_bus(struct pci_bus *bus)
/* lba_dump_res(&iomem_resource, 2); */
/* BUG(); */
- } else
- bus->resource[i++] = &(ldev->hba.elmmio_space);
+ }
}
-
- /* Overlaps with elmmio can (and should) fail here.
- * We will prune (or ignore) the distributed range.
- *
- * FIXME: SBA code should register all elmmio ranges first.
- * that would take care of elmmio ranges routed
- * to a different rope (already discovered) from
- * getting registered *after* LBA code has already
- * registered it's distributed lmmio range.
- */
- if (truncate_pat_collision(&iomem_resource,
- &(ldev->hba.lmmio_space))) {
-
- printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
- (long)ldev->hba.lmmio_space.start,
- (long)ldev->hba.lmmio_space.end);
- } else {
+ if (ldev->hba.lmmio_space.flags) {
err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
if (err < 0) {
printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
"lmmio_space [%lx/%lx]\n",
(long)ldev->hba.lmmio_space.start,
(long)ldev->hba.lmmio_space.end);
- } else
- bus->resource[i++] = &(ldev->hba.lmmio_space);
+ }
}
#ifdef CONFIG_64BIT
@@ -728,7 +707,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&iomem_resource, 2);
BUG();
}
- bus->resource[i++] = &(ldev->hba.gmmio_space);
}
#endif
@@ -1404,6 +1382,7 @@ static int __init
lba_driver_probe(struct parisc_device *dev)
{
struct lba_device *lba_dev;
+ LIST_HEAD(resources);
struct pci_bus *lba_bus;
struct pci_ops *cfg_ops;
u32 func_class;
@@ -1518,10 +1497,41 @@ lba_driver_probe(struct parisc_device *dev)
if (lba_dev->hba.bus_num.start < lba_next_bus)
lba_dev->hba.bus_num.start = lba_next_bus;
+ /* Overlaps with elmmio can (and should) fail here.
+ * We will prune (or ignore) the distributed range.
+ *
+ * FIXME: SBA code should register all elmmio ranges first.
+ * that would take care of elmmio ranges routed
+ * to a different rope (already discovered) from
+ * getting registered *after* LBA code has already
+ * registered it's distributed lmmio range.
+ */
+ if (truncate_pat_collision(&iomem_resource,
+ &(lba_dev->hba.lmmio_space))) {
+ printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
+ (long)lba_dev->hba.lmmio_space.start,
+ (long)lba_dev->hba.lmmio_space.end);
+ lba_dev->hba.lmmio_space.flags = 0;
+ }
+
+ pci_add_resource(&resources, &lba_dev->hba.io_space);
+ if (lba_dev->hba.elmmio_space.start)
+ pci_add_resource(&resources, &lba_dev->hba.elmmio_space);
+ if (lba_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.lmmio_space);
+ if (lba_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
- pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
- cfg_ops, NULL);
+ pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
+ cfg_ops, NULL, &resources);
+ if (!lba_bus) {
+ pci_free_resource_list(&resources);
+ return 0;
+ }
+
+ lba_bus->subordinate = pci_scan_child_bus(lba_bus);
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1551,10 +1561,8 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
- if (lba_bus) {
- lba_next_bus = lba_bus->subordinate + 1;
- pci_bus_add_devices(lba_bus);
- }
+ lba_next_bus = lba_bus->subordinate + 1;
+ pci_bus_add_devices(lba_bus);
/* Whew! Finally done! Tell services we got this one covered. */
return 0;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42aac7c6..2a581642c237 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_RAW_SPINLOCK(pci_lock);
+DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -127,20 +127,20 @@ EXPORT_SYMBOL(pci_write_vpd);
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
-static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
-static noinline void pci_wait_ucfg(struct pci_dev *dev)
+static noinline void pci_wait_cfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue(&pci_ucfg_wait, &wait);
+ __add_wait_queue(&pci_cfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
schedule();
raw_spin_lock_irq(&pci_lock);
- } while (dev->block_ucfg_access);
- __remove_wait_queue(&pci_ucfg_wait, &wait);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
@@ -153,7 +153,8 @@ int pci_user_read_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
@@ -172,7 +173,8 @@ int pci_user_write_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
@@ -401,36 +403,56 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
EXPORT_SYMBOL(pci_vpd_truncate);
/**
- * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+ * pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
*
- * When user access is blocked, any reads or writes to config space will
- * sleep until access is unblocked again. We don't allow nesting of
- * block/unblock calls.
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
*/
-void pci_block_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_lock(struct pci_dev *dev)
+{
+ might_sleep();
+
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
+
+/**
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
+ */
+bool pci_cfg_access_trylock(struct pci_dev *dev)
{
unsigned long flags;
- int was_blocked;
+ bool locked = true;
raw_spin_lock_irqsave(&pci_lock, flags);
- was_blocked = dev->block_ucfg_access;
- dev->block_ucfg_access = 1;
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
raw_spin_unlock_irqrestore(&pci_lock, flags);
- /* If we BUG() inside the pci_lock, we're guaranteed to hose
- * the machine */
- BUG_ON(was_blocked);
+ return locked;
}
-EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
/**
- * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
* @dev: pci device struct
*
- * This function allows userspace PCI config accesses to resume.
+ * This function allows PCI config accesses to resume.
*/
-void pci_unblock_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_unlock(struct pci_dev *dev)
{
unsigned long flags;
@@ -438,10 +460,10 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
- WARN_ON(!dev->block_ucfg_access);
+ WARN_ON(!dev->block_cfg_access);
- dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
-EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 9dd90b30f91a..95655d7c0d0b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -128,6 +128,23 @@ void pci_disable_ats(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_ats);
+void pci_restore_ats_state(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ if (!pci_ats_enabled(dev))
+ return;
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
+ BUG();
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
+
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(pci_restore_ats_state);
+
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
* @dev: the PCI device
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 1e2ad92a4752..398f5d859791 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -18,6 +18,32 @@
#include "pci.h"
+void pci_add_resource(struct list_head *resources, struct resource *res)
+{
+ struct pci_bus_resource *bus_res;
+
+ bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
+ if (!bus_res) {
+ printk(KERN_ERR "PCI: can't add bus resource %pR\n", res);
+ return;
+ }
+
+ bus_res->res = res;
+ list_add_tail(&bus_res->list, resources);
+}
+EXPORT_SYMBOL(pci_add_resource);
+
+void pci_free_resource_list(struct list_head *resources)
+{
+ struct pci_bus_resource *bus_res, *tmp;
+
+ list_for_each_entry_safe(bus_res, tmp, resources, list) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ }
+}
+EXPORT_SYMBOL(pci_free_resource_list);
+
void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags)
{
@@ -52,16 +78,12 @@ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
void pci_bus_remove_resources(struct pci_bus *bus)
{
- struct pci_bus_resource *bus_res, *tmp;
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
bus->resource[i] = NULL;
- list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
- list_del(&bus_res->list);
- kfree(bus_res);
- }
+ pci_free_resource_list(&bus->resources);
}
/**
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1969a3ee3058..0321fa3b4226 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -347,11 +347,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
+ pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
+
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
msleep(100);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
iov->initial = initial;
if (nr_virtfn < initial)
@@ -379,10 +381,10 @@ failed:
virtfn_remove(dev, j, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -405,10 +407,10 @@ static void sriov_disable(struct pci_dev *dev)
virtfn_remove(dev, i, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -452,7 +454,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
@@ -465,7 +466,6 @@ found:
return -EIO;
pgsz &= ~(pgsz - 1);
- pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 337e16ab4a92..a825d78fd0aa 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -86,6 +86,31 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
}
#endif
+#ifndef arch_restore_msi_irqs
+# define arch_restore_msi_irqs default_restore_msi_irqs
+# define HAVE_DEFAULT_MSI_RESTORE_IRQS
+#endif
+
+#ifdef HAVE_DEFAULT_MSI_RESTORE_IRQS
+void default_restore_msi_irqs(struct pci_dev *dev, int irq)
+{
+ struct msi_desc *entry;
+
+ entry = NULL;
+ if (dev->msix_enabled) {
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (irq == entry->irq)
+ break;
+ }
+ } else if (dev->msi_enabled) {
+ entry = irq_get_msi_desc(irq);
+ }
+
+ if (entry)
+ write_msi_msg(irq, &entry->msg);
+}
+#endif
+
static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
@@ -323,8 +348,18 @@ static void free_msi_irqs(struct pci_dev *dev)
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
- kobject_del(&entry->kobj);
- kobject_put(&entry->kobj);
+
+ /*
+ * Its possible that we get into this path
+ * When populate_msi_sysfs fails, which means the entries
+ * were not registered with sysfs. In that case don't
+ * unregister them.
+ */
+ if (entry->kobj.parent) {
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ }
+
list_del(&entry->list);
kfree(entry);
}
@@ -362,7 +397,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 0);
- write_msi_msg(dev->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, dev->irq);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -390,7 +425,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
- write_msi_msg(entry->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, entry->irq);
msix_mask_irq(entry, entry->masked);
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 12d1e81a8abe..3623d65f8b86 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -604,7 +604,8 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN_ON(ret && drv->driver.pm);
+ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
+ drv->name, pci_dev->vendor, pci_dev->device);
return ret;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d4a5319148d..97fff785e97e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -88,6 +88,12 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
u8 pci_cache_line_size;
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain BIOSes forget to set it properly.
+ */
+unsigned int pcibios_max_latency = 255;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -959,6 +965,7 @@ void pci_restore_state(struct pci_dev *dev)
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
+ pci_restore_ats_state(dev);
/*
* The Base Address register should be programmed before the command
@@ -967,7 +974,7 @@ void pci_restore_state(struct pci_dev *dev)
for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val);
if (val != dev->saved_config_space[i]) {
- dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
+ dev_dbg(&dev->dev, "restoring config "
"space at offset %#x (was %#x, writing %#x)\n",
i, val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4,
@@ -1536,8 +1543,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
out:
- dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
- enable ? "enabled" : "disabled");
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
/**
@@ -2596,6 +2602,33 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
}
/**
+ * pcibios_set_master - enable PCI bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables PCI bus-mastering for the device. This is the default
+ * implementation. Architecture specific implementations can override
+ * this if necessary.
+ */
+void __weak pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
+ if (pci_is_pcie(dev))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+ dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
@@ -2768,6 +2801,116 @@ pci_intx(struct pci_dev *pdev, int enable)
}
/**
+ * pci_intx_mask_supported - probe for INTx masking support
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev support INTx masking via the config space
+ * command word.
+ */
+bool pci_intx_mask_supported(struct pci_dev *dev)
+{
+ bool mask_supported = false;
+ u16 orig, new;
+
+ pci_cfg_access_lock(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ /*
+ * There's no way to protect against hardware bugs or detect them
+ * reliably, but as long as we know what the value should be, let's
+ * go ahead and check it.
+ */
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&dev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+ }
+
+ pci_cfg_access_unlock(dev);
+ return mask_supported;
+}
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and
+ * return true in that case. False is returned if not interrupt was
+ * pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not
+ * and return true. False is returned and the mask remains active if
+ * there was still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
* pci_msi_off - disables any msi or msix capabilities
* @dev: the PCI device to operate on
*
@@ -2965,7 +3108,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
might_sleep();
if (!probe) {
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
}
@@ -2990,7 +3133,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
done:
if (!probe) {
device_unlock(&dev->dev);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
}
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b74084e9ca12..1009a5e88e53 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -136,6 +136,8 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern raw_spinlock_t pci_lock;
+
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
@@ -249,6 +251,14 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */
};
+#ifdef CONFIG_PCI_ATS
+extern void pci_restore_ats_state(struct pci_dev *dev);
+#else
+static inline void pci_restore_ats_state(struct pci_dev *dev)
+{
+}
+#endif /* CONFIG_PCI_ATS */
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dc29348264c6..72962cc92e0a 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -39,7 +39,7 @@ config PCIEASPM
Power Management) and Clock Power Management. ASPM supports
state L0/L0s/L1.
- ASPM is initially set up the the firmware. With this option enabled,
+ ASPM is initially set up by the firmware. With this option enabled,
Linux can modify this state in order to disable ASPM on known-bad
hardware or configurations and enable it when known-safe.
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 04e74f485714..7cc9e2f0f47c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1522,19 +1522,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
return max;
}
-struct pci_bus * pci_create_bus(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata)
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- int error;
+ int error, i;
struct pci_bus *b, *b2;
struct device *dev;
+ struct pci_bus_resource *bus_res, *n;
+ struct resource *res;
b = pci_alloc_bus();
if (!b)
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev){
+ if (!dev) {
kfree(b);
return NULL;
}
@@ -1577,8 +1579,20 @@ struct pci_bus * pci_create_bus(struct device *parent,
pci_create_legacy_files(b);
b->number = b->secondary = bus;
- b->resource[0] = &ioport_resource;
- b->resource[1] = &iomem_resource;
+
+ /* Add initial resources to the bus */
+ list_for_each_entry_safe(bus_res, n, resources, list)
+ list_move_tail(&bus_res->list, &b->resources);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
+ else
+ printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
+
+ pci_bus_for_each_resource(b, res, i) {
+ if (res)
+ dev_info(&b->dev, "root bus resource %pR\n", res);
+ }
return b;
@@ -1594,18 +1608,58 @@ err_out:
return NULL;
}
+struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ struct pci_bus *b;
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_root_bus);
+
+/* Deprecated; use pci_scan_root_bus() instead */
struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
+ LIST_HEAD(resources);
struct pci_bus *b;
- b = pci_create_bus(parent, bus, ops, sysdata);
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
if (b)
b->subordinate = pci_scan_child_bus(b);
+ else
+ pci_free_resource_list(&resources);
return b;
}
EXPORT_SYMBOL(pci_scan_bus_parented);
+struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
+ if (b) {
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ } else {
+ pci_free_resource_list(&resources);
+ }
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus);
+
#ifdef CONFIG_HOTPLUG
/**
* pci_rescan_bus - scan a PCI bus for devices.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 7f87beed35ac..6def3624c688 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -89,9 +89,8 @@ EXPORT_SYMBOL(pci_remove_bus);
* device lists, remove the /proc entry, and notify userspace
* (/sbin/hotplug).
*/
-void pci_remove_bus_device(struct pci_dev *dev)
+static void __pci_remove_bus_device(struct pci_dev *dev)
{
- pci_stop_bus_device(dev);
if (dev->subordinate) {
struct pci_bus *b = dev->subordinate;
@@ -102,6 +101,11 @@ void pci_remove_bus_device(struct pci_dev *dev)
pci_destroy_dev(dev);
}
+void pci_remove_bus_device(struct pci_dev *dev)
+{
+ pci_stop_bus_device(dev);
+ __pci_remove_bus_device(dev);
+}
/**
* pci_remove_behind_bridge - remove all devices behind a PCI bridge
@@ -117,7 +121,7 @@ void pci_remove_behind_bridge(struct pci_dev *dev)
if (dev->subordinate)
list_for_each_safe(l, n, &dev->subordinate->devices)
- pci_remove_bus_device(pci_dev_b(l));
+ __pci_remove_bus_device(pci_dev_b(l));
}
static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 5717509becbe..b66bfdbd21f7 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -85,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
+ dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
+ resno, res, (unsigned long long)region.start,
+ (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index dfbd5a6cc58b..258fef272ea7 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
}
}
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd_nb.h>
+
+static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
+{
+ resource_size_t start, end;
+ struct pnp_resource *pnp_res;
+ struct resource *res;
+ struct resource mmconfig_res, *mmconfig;
+
+ mmconfig = amd_get_mmconfig_range(&mmconfig_res);
+ if (!mmconfig)
+ return;
+
+ list_for_each_entry(pnp_res, &dev->resources, list) {
+ res = &pnp_res->res;
+ if (res->end < mmconfig->start || res->start > mmconfig->end ||
+ (res->start == mmconfig->start && res->end == mmconfig->end))
+ continue;
+
+ dev_info(&dev->dev, FW_BUG
+ "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
+ res, mmconfig);
+ if (mmconfig->start < res->start) {
+ start = mmconfig->start;
+ end = res->start - 1;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ if (mmconfig->end > res->end) {
+ start = res->end + 1;
+ end = mmconfig->end;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ break;
+ }
+}
+#endif
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
/* PnP resources that might overlap PCI BARs */
{"PNP0c01", quirk_system_pci_resources},
{"PNP0c02", quirk_system_pci_resources},
+#ifdef CONFIG_AMD_NB
+ {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
{""}
};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fd860d952b28..67b169b7a5be 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7638,8 +7638,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
ENTER;
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->job_step = ipr_reset_restore_cfg_space;
LEAVE;
return IPR_RC_JOB_CONTINUE;
@@ -7660,8 +7664,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
int rc = PCIBIOS_SUCCESSFUL;
ENTER;
- pci_block_user_cfg_access(ioa_cfg->pdev);
-
if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
writel(IPR_UPROCI_SIS64_START_BIST,
ioa_cfg->regs.set_uproc_interrupt_reg32);
@@ -7673,7 +7675,9 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
} else {
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7716,7 +7720,6 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_block_user_cfg_access(pdev);
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
@@ -7725,6 +7728,56 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_block_config_access_wait - Wait for permission to block config access
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+
+ if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
+ ioa_cfg->cfg_locked = 1;
+ ipr_cmd->job_step = ioa_cfg->reset;
+ } else {
+ if (ipr_cmd->u.time_left) {
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd,
+ IPR_CHECK_FOR_RESET_TIMEOUT);
+ } else {
+ ipr_cmd->job_step = ioa_cfg->reset;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Timed out waiting to lock config access. Resetting anyway.\n");
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_reset_block_config_access - Block config access to the IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
+{
+ ipr_cmd->ioa_cfg->cfg_locked = 0;
+ ipr_cmd->job_step = ipr_reset_block_config_access_wait;
+ ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
* ipr_reset_allowed - Query whether or not IOA can be reset
* @ioa_cfg: ioa config struct
*
@@ -7763,7 +7816,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7796,7 +7849,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
}
ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index ac84736c1b9c..b13f9cc12279 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1387,6 +1387,7 @@ struct ipr_ioa_cfg {
u8 msi_received:1;
u8 sis64:1;
u8 dump_timeout:1;
+ u8 cfg_locked:1;
u8 revid;
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 02bd47bdee1c..0bd08ef2b394 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -45,77 +45,12 @@ to_uio_pci_generic_dev(struct uio_info *info)
static irqreturn_t irqhandler(int irq, struct uio_info *info)
{
struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
- struct pci_dev *pdev = gdev->pdev;
- irqreturn_t ret = IRQ_NONE;
- u32 cmd_status_dword;
- u16 origcmd, newcmd, status;
-
- /* We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible. */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- pci_block_user_cfg_access(pdev);
-
- /* Read both command and status registers in a single 32-bit operation.
- * Note: we could cache the value for command and move the status read
- * out of the lock if there was a way to get notified of user changes
- * to command register through sysfs. Should be good for shared irqs. */
- pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
- origcmd = cmd_status_dword;
- status = cmd_status_dword >> 16;
-
- /* Check interrupt status register to see whether our device
- * triggered the interrupt. */
- if (!(status & PCI_STATUS_INTERRUPT))
- goto done;
-
- /* We triggered the interrupt, disable it. */
- newcmd = origcmd | PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- pci_write_config_word(pdev, PCI_COMMAND, newcmd);
- /* UIO core will signal the user process. */
- ret = IRQ_HANDLED;
-done:
-
- pci_unblock_user_cfg_access(pdev);
- return ret;
-}
+ if (!pci_check_and_mask_intx(gdev->pdev))
+ return IRQ_NONE;
-/* Verify that the device supports Interrupt Disable bit in command register,
- * per PCI 2.3, by flipping this bit and reading it back: this bit was readonly
- * in PCI 2.2. */
-static int __devinit verify_pci_2_3(struct pci_dev *pdev)
-{
- u16 orig, new;
- int err = 0;
-
- pci_block_user_cfg_access(pdev);
- pci_read_config_word(pdev, PCI_COMMAND, &orig);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig ^ PCI_COMMAND_INTX_DISABLE);
- pci_read_config_word(pdev, PCI_COMMAND, &new);
- /* There's no way to protect against
- * hardware bugs or detect them reliably, but as long as we know
- * what the value should be, let's go ahead and check it. */
- if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
- err = -EBUSY;
- dev_err(&pdev->dev, "Command changed from 0x%x to 0x%x: "
- "driver or HW bug?\n", orig, new);
- goto err;
- }
- if (!((new ^ orig) & PCI_COMMAND_INTX_DISABLE)) {
- dev_warn(&pdev->dev, "Device does not support "
- "disabling interrupts: unable to bind.\n");
- err = -ENODEV;
- goto err;
- }
- /* Now restore the original value. */
- pci_write_config_word(pdev, PCI_COMMAND, orig);
-err:
- pci_unblock_user_cfg_access(pdev);
- return err;
+ /* UIO core will signal the user process. */
+ return IRQ_HANDLED;
}
static int __devinit probe(struct pci_dev *pdev,
@@ -138,9 +73,10 @@ static int __devinit probe(struct pci_dev *pdev,
return -ENODEV;
}
- err = verify_pci_2_3(pdev);
- if (err)
+ if (!pci_intx_mask_supported(pdev)) {
+ err = -ENODEV;
goto err_verify;
+ }
gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev) {