summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_nvme.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 15:14:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 15:14:01 -0700
commitba6d10ab8014ac10d25ca513352b6665e73b5785 (patch)
tree3b7aaa3f2d76d0c0e9612bc87e1da45577465528 /drivers/scsi/qla2xxx/qla_nvme.c
parent64b08df460cfdfc2b010263043a057cdd33500ed (diff)
parentbaf23eddbf2a4ba9bf2bdb342686c71a8042e39b (diff)
downloadlinux-stable-ba6d10ab8014ac10d25ca513352b6665e73b5785.tar.gz
linux-stable-ba6d10ab8014ac10d25ca513352b6665e73b5785.tar.bz2
linux-stable-ba6d10ab8014ac10d25ca513352b6665e73b5785.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: qla2xxx, hpsa, lpfc, ufs, mpt3sas, ibmvscsi, megaraid_sas, bnx2fc and hisi_sas as well as the removal of the osst driver (I heard from Willem privately that he would like the driver removed because all his test hardware has failed). Plus number of minor changes, spelling fixes and other trivia. The big merge conflict this time around is the SPDX licence tags. Following discussion on linux-next, we believe our version to be more accurate than the one in the tree, so the resolution is to take our version for all the SPDX conflicts" Note on the SPDX license tag conversion conflicts: the SCSI tree had done its own SPDX conversion, which in some cases conflicted with the treewide ones done by Thomas & co. In almost all cases, the conflicts were purely syntactic: the SCSI tree used the old-style SPDX tags ("GPL-2.0" and "GPL-2.0+") while the treewide conversion had used the new-style ones ("GPL-2.0-only" and "GPL-2.0-or-later"). In these cases I picked the new-style one. In a few cases, the SPDX conversion was actually different, though. As explained by James above, and in more detail in a pre-pull-request thread: "The other problem is actually substantive: In the libsas code Luben Tuikov originally specified gpl 2.0 only by dint of stating: * This file is licensed under GPLv2. In all the libsas files, but then muddied the water by quoting GPLv2 verbatim (which includes the or later than language). So for these files Christoph did the conversion to v2 only SPDX tags and Thomas converted to v2 or later tags" So in those cases, where the spdx tag substantially mattered, I took the SCSI tree conversion of it, but then also took the opportunity to turn the old-style "GPL-2.0" into a new-style "GPL-2.0-only" tag. Similarly, when there were whitespace differences or other differences to the comments around the copyright notices, I took the version from the SCSI tree as being the more specific conversion. Finally, in the spdx conversions that had no conflicts (because the treewide ones hadn't been done for those files), I just took the SCSI tree version as-is, even if it was old-style. The old-style conversions are perfectly valid, even if the "-only" and "-or-later" versions are perhaps more descriptive. * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (185 commits) scsi: qla2xxx: move IO flush to the front of NVME rport unregistration scsi: qla2xxx: Fix NVME cmd and LS cmd timeout race condition scsi: qla2xxx: on session delete, return nvme cmd scsi: qla2xxx: Fix kernel crash after disconnecting NVMe devices scsi: megaraid_sas: Update driver version to 07.710.06.00-rc1 scsi: megaraid_sas: Introduce various Aero performance modes scsi: megaraid_sas: Use high IOPS queues based on IO workload scsi: megaraid_sas: Set affinity for high IOPS reply queues scsi: megaraid_sas: Enable coalescing for high IOPS queues scsi: megaraid_sas: Add support for High IOPS queues scsi: megaraid_sas: Add support for MPI toolbox commands scsi: megaraid_sas: Offload Aero RAID5/6 division calculations to driver scsi: megaraid_sas: RAID1 PCI bandwidth limit algorithm is applicable for only Ventura scsi: megaraid_sas: megaraid_sas: Add check for count returned by HOST_DEVICE_LIST DCMD scsi: megaraid_sas: Handle sequence JBOD map failure at driver level scsi: megaraid_sas: Don't send FPIO to RL Bypass queue scsi: megaraid_sas: In probe context, retry IOC INIT once if firmware is in fault scsi: megaraid_sas: Release Mutex lock before OCR in case of DCMD timeout scsi: megaraid_sas: Call disable_irq from process IRQ poll scsi: megaraid_sas: Remove few debug counters from IO path ...
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_nvme.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c236
1 files changed, 141 insertions, 95 deletions
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 22e3fba28e51..963094b3c300 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,8 +12,6 @@
static struct nvme_fc_port_template qla_nvme_fc_transport;
-static void qla_nvme_unregister_remote_port(struct work_struct *);
-
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
struct qla_nvme_rport *rport;
@@ -38,7 +36,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
@@ -74,7 +71,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
- list_add_tail(&rport->list, &vha->nvme_rport_list);
fcport->nvme_flag |= NVME_FLAG_REGISTERED;
return 0;
@@ -124,53 +120,91 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return 0;
}
+static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_fcp_req *fd;
+ struct srb_iocb *nvme;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ if (priv->comp_status == QLA_SUCCESS) {
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ } else {
+ fd->rcv_rsplen = 0;
+ fd->transferred_length = 0;
+ }
+ fd->status = 0;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd->done(fd);
+out:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_ls_req *fd;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd = priv->fd;
+ fd->done(fd, priv->comp_status);
+out:
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+
+ kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
+}
+
static void qla_nvme_sp_ls_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_ls_req *fd;
struct nvme_private *priv;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
- atomic_dec(&sp->ref_count);
-
if (res)
res = -EINVAL;
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
- priv = fd->private;
+ priv = (struct nvme_private *)sp->priv;
priv->comp_status = res;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
schedule_work(&priv->ls_work);
- /* work schedule doesn't need the sp */
- qla2x00_rel_sp(sp);
}
+/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_fcp_req *fd;
-
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
-
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
- atomic_dec(&sp->ref_count);
-
- if (res == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
- } else {
- fd->rcv_rsplen = 0;
- fd->transferred_length = 0;
- }
- fd->status = 0;
- fd->done(fd);
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ priv->comp_status = res;
+ kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
return;
}
@@ -189,44 +223,50 @@ static void qla_nvme_abort_work(struct work_struct *work)
__func__, sp, sp->handle, fcport, fcport->deleted);
if (!ha->flags.fw_started && (fcport && fcport->deleted))
- return;
+ goto out;
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
"%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
__func__, sp, sp->type, atomic_read(&sp->ref_count));
sp->done(sp, 0);
- return;
+ goto out;
}
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
sp, sp->handle, fcport, rval);
+
+out:
+ /* kref_get was done before work was schedule. */
+ kref_put(&sp->cmd_kref, sp->put_fn);
}
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
}
-static void qla_nvme_ls_complete(struct work_struct *work)
-{
- struct nvme_private *priv =
- container_of(work, struct nvme_private, ls_work);
- struct nvmefc_ls_req *fd = priv->fd;
-
- fd->done(fd, priv->comp_status);
-}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
@@ -240,8 +280,16 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
+
+ if (!fcport || (fcport && fcport->deleted))
+ return rval;
+
vha = fcport->vha;
ha = vha->hw;
+
+ if (!ha->flags.fw_started)
+ return rval;
+
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
@@ -250,11 +298,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->type = SRB_NVME_LS;
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
- atomic_set(&sp->ref_count, 1);
- nvme = &sp->u.iocb_cmd;
+ sp->put_fn = qla_nvme_release_ls_cmd_kref;
+ sp->priv = (void *)priv;
priv->sp = sp;
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
priv->fd = fd;
- INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
@@ -271,8 +321,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2x00_rel_sp(sp);
return rval;
}
@@ -284,6 +336,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
struct nvmefc_fcp_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
@@ -487,11 +551,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fcport = qla_rport->fcport;
- vha = fcport->vha;
-
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
+ (fcport && fcport->deleted))
return rval;
+ vha = fcport->vha;
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -507,12 +571,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (!sp)
return -EBUSY;
- atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitq);
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ sp->priv = (void *)priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
+ sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
nvme = &sp->u.iocb_cmd;
@@ -522,8 +589,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
return rval;
@@ -542,29 +611,16 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
{
fc_port_t *fcport;
- struct qla_nvme_rport *qla_rport = rport->private, *trport;
+ struct qla_nvme_rport *qla_rport = rport->private;
fcport = qla_rport->fcport;
fcport->nvme_remote_port = NULL;
fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- list_del(&qla_rport->list);
- break;
- }
- }
- complete(&fcport->nvme_del_done);
-
- if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
- INIT_WORK(&fcport->free_work, qlt_free_session_done);
- schedule_work(&fcport->free_work);
- }
-
fcport->nvme_flag &= ~NVME_FLAG_DELETING;
ql_log(ql_log_info, fcport->vha, 0x2110,
- "remoteport_delete of %p completed.\n", fcport);
+ "remoteport_delete of %p %8phN completed.\n",
+ fcport, fcport->port_name);
+ complete(&fcport->nvme_del_done);
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
@@ -586,35 +642,25 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
-static void qla_nvme_unregister_remote_port(struct work_struct *work)
+void qla_nvme_unregister_remote_port(struct fc_port *fcport)
{
- struct fc_port *fcport = container_of(work, struct fc_port,
- nvme_del_work);
- struct qla_nvme_rport *qla_rport, *trport;
+ int ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
ql_log(ql_log_warn, NULL, 0x2112,
- "%s: unregister remoteport on %p\n",__func__, fcport);
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- ql_log(ql_log_info, fcport->vha, 0x2113,
- "%s: fcport=%p\n", __func__, fcport);
- nvme_fc_set_remoteport_devloss
- (fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- if (nvme_fc_unregister_remoteport
- (fcport->nvme_remote_port))
- ql_log(ql_log_info, fcport->vha, 0x2114,
- "%s: Failed to unregister nvme_remote_port\n",
- __func__);
- wait_for_completion(&fcport->nvme_del_done);
- break;
- }
- }
+ "%s: unregister remoteport on %p %8phN\n",
+ __func__, fcport, fcport->port_name);
+
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+ init_completion(&fcport->nvme_del_done);
+ ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ if (ret)
+ ql_log(ql_log_info, fcport->vha, 0x2114,
+ "%s: Failed to unregister nvme_remote_port (%d)\n",
+ __func__, ret);
+ wait_for_completion(&fcport->nvme_del_done);
}
void qla_nvme_delete(struct scsi_qla_host *vha)