summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi/ibmvfc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-03-28 11:34:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-03-28 11:34:47 -0700
commite8cfe8fa22b6c3d12595f68fde6ef10121795267 (patch)
treece5fa0c1851a804598a734ebf07af42f2223d507 /drivers/scsi/ibmvscsi/ibmvfc.c
parent0f4498cef9f5cd18d7c6639a2a902ec1edc5be4e (diff)
parent36fa766faa0c822c860e636fe82b1affcd022974 (diff)
downloadlinux-stable-e8cfe8fa22b6c3d12595f68fde6ef10121795267.tar.gz
linux-stable-e8cfe8fa22b6c3d12595f68fde6ef10121795267.tar.bz2
linux-stable-e8cfe8fa22b6c3d12595f68fde6ef10121795267.zip
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI fixes from James Bottomley: "Seven fixes, all in drivers (qla2xxx, mkt3sas, qedi, target, ibmvscsi). The most serious are the target pscsi oom and the qla2xxx revert which can otherwise cause a use after free" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: target: pscsi: Clean up after failure in pscsi_map_sg() scsi: target: pscsi: Avoid OOM in pscsi_map_sg() scsi: mpt3sas: Fix error return code of mpt3sas_base_attach() scsi: qedi: Fix error return code of qedi_alloc_global_queues() scsi: Revert "qla2xxx: Make sure that aborted commands are freed" scsi: ibmvfc: Make ibmvfc_wait_for_ops() MQ aware scsi: ibmvfc: Fix potential race in ibmvfc_wait_for_ops()
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c67
1 files changed, 54 insertions, 13 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f140eafc0d6a..61831f2fdb30 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2372,6 +2372,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
}
/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt: ibmvfc event struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_event *loop_evt;
+
+ list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+ if (loop_evt == evt)
+ return true;
+
+ return false;
+}
+
+/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
@@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
+ int wait, i, q_index, q_size;
unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+ struct ibmvfc_queue *queues;
ENTER;
+ if (vhost->mq_enabled && vhost->using_channels) {
+ queues = vhost->scsi_scrqs.scrqs;
+ q_size = vhost->scsi_scrqs.active_queues;
+ } else {
+ queues = &vhost->crq;
+ q_size = 1;
+ }
+
do {
wait = 0;
- spin_lock_irqsave(&vhost->crq.l_lock, flags);
- list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
- spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) {
wait = 0;
- spin_lock_irqsave(&vhost->crq.l_lock, flags);
- list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
- spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE;