summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2009-03-20 15:44:35 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-04-03 09:22:42 -0500
commit039a08981a49e05f09db969cdd3f38f05a5df46f (patch)
treeb7ba49c8740a97ea4be7c7d99445ec3a224f45a7 /drivers/scsi/ibmvscsi
parent8fe74cf053de7ad2124a894996f84fa890a81093 (diff)
downloadlinux-039a08981a49e05f09db969cdd3f38f05a5df46f.tar.gz
linux-039a08981a49e05f09db969cdd3f38f05a5df46f.tar.bz2
linux-039a08981a49e05f09db969cdd3f38f05a5df46f.zip
[SCSI] ibmvfc: Fix dropped interrupts
This patch fixes a problem of possible dropped interrupts. Currently, the ibmvfc driver has a race condition where after ibmvfc_interrupt gets run, the platform code clears the interrupt. This can result in lost interrupts and, in worst case scenarios, result in command timeouts. Fix this by implementing a tasklet similar to what the ibmvscsi driver does so that interrupt processing is no longer done in the actual interrupt handler, which eliminates the race. Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c25
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
2 files changed, 24 insertions, 2 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 93d1fbe4ee5d..229e360f0220 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -640,6 +640,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
+ tasklet_kill(&vhost->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -2699,6 +2700,25 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
{
struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vio_disable_interrupts(to_vio_dev(vhost->dev));
+ tasklet_schedule(&vhost->tasklet);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ibmvfc_tasklet - Interrupt handler tasklet
+ * @data: ibmvfc host struct
+ *
+ * Returns:
+ * Nothing
+ **/
+static void ibmvfc_tasklet(void *data)
+{
+ struct ibmvfc_host *vhost = data;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
@@ -2706,7 +2726,6 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
int done = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
- vio_disable_interrupts(to_vio_dev(vhost->dev));
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
@@ -2734,7 +2753,6 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return IRQ_HANDLED;
}
/**
@@ -3859,6 +3877,8 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
retrc = 0;
+ tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
goto req_irq_failed;
@@ -3874,6 +3894,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
return retrc;
req_irq_failed:
+ tasklet_kill(&vhost->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b21e071b9862..70107522e3a9 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -684,6 +684,7 @@ struct ibmvfc_host {
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
+ struct tasklet_struct tasklet;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
};