summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <JBottomley@Parallels.com>2012-10-02 08:55:12 +0100
committerJames Bottomley <JBottomley@Parallels.com>2012-10-02 08:55:12 +0100
commitfe709ed827d370e6b0c0a9f9456da1c22bdcd118 (patch)
treec5a7fd72a745a5f6656a58acc9a1d277e26f9595
parent1c4cf1d5845b59cdcbfad8e67272cf5b219ab062 (diff)
parent0644f5393e915f13733bcc65f13195ff39aeb63e (diff)
downloadlinux-fe709ed827d370e6b0c0a9f9456da1c22bdcd118.tar.gz
linux-fe709ed827d370e6b0c0a9f9456da1c22bdcd118.tar.bz2
linux-fe709ed827d370e6b0c0a9f9456da1c22bdcd118.zip
Merge SCSI misc branch into isci-for-3.6 tag
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas10
-rw-r--r--Documentation/scsi/LICENSE.qla2xxx2
-rw-r--r--Documentation/scsi/LICENSE.qla4xxx2
-rw-r--r--Documentation/scsi/st.txt6
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/ata/libata-core.c69
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/message/fusion/mptbase.c18
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c80
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c22
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c59
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c16
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c18
-rw-r--r--drivers/s390/scsi/zfcp_unit.c36
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c160
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h27
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c355
-rw-r--r--drivers/scsi/be2iscsi/be_main.c890
-rw-r--r--drivers/scsi/be2iscsi/be_main.h40
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c314
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h11
-rw-r--r--drivers/scsi/bfa/bfa_core.c21
-rw-r--r--drivers/scsi/bfa/bfa_cs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h18
-rw-r--r--drivers/scsi/bfa/bfa_fc.h10
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c21
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c12
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c182
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h66
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c129
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c632
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c466
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c24
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c95
-rw-r--r--drivers/scsi/bfa/bfa_svc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c236
-rw-r--r--drivers/scsi/bfa/bfad_attr.c46
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c39
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h5
-rw-r--r--drivers/scsi/bfa/bfad_im.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c26
-rw-r--r--drivers/scsi/hpsa.c39
-rw-r--r--drivers/scsi/ibmvscsi/Makefile6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c36
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c352
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h22
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c368
-rw-r--r--drivers/scsi/ipr.c234
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/isci/host.c22
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/init.c58
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c91
-rw-r--r--drivers/scsi/libsas/sas_discover.c69
-rw-r--r--drivers/scsi/libsas/sas_dump.c1
-rw-r--r--drivers/scsi/libsas/sas_event.c4
-rw-r--r--drivers/scsi/libsas/sas_init.c90
-rw-r--r--drivers/scsi/libsas/sas_internal.h1
-rw-r--r--drivers/scsi/libsas/sas_phy.c21
-rw-r--r--drivers/scsi/libsas/sas_port.c52
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c494
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h72
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c186
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c839
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c127
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c144
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c621
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c12
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h14
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c80
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c59
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c376
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h166
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h49
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c565
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c261
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c437
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c224
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c153
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c770
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/Kconfig4
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c1611
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h283
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c26
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c32
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h65
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h59
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h94
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c406
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c186
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c1432
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h198
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c494
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c30
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/sd.c80
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sd_dif.c25
-rw-r--r--drivers/scsi/st.c416
-rw-r--r--drivers/scsi/st.h5
-rw-r--r--include/linux/libata.h15
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/scsi/libsas.h20
-rw-r--r--include/scsi/sas_ata.h10
-rw-r--r--include/scsi/scsi_bsg_fc.h2
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_host.h6
176 files changed, 13492 insertions, 4460 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ad7e2e5088c1..2cb2d81a51ab 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1350,6 +1350,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* nohrst, nosrst, norst: suppress hard, soft
and both resets.
+ * rstonce: only attempt one reset during
+ hot-unplug link recovery
+
* dump_id: dump IDENTIFY data.
If there are multiple matching configurations changing
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 80441ab608e4..3a3079411a3d 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
+Release Date : Tue. Jun 17, 2012 17:00:00 PST 2012 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford/Kashyap Desai
+Current Version : 00.00.06.18-rc1
+Old Version : 00.00.06.15-rc1
+ 1. Fix Copyright dates.
+ 2. Add throttlequeuedepth module parameter.
+ 3. Add resetwaittime module parameter.
+ 4. Move poll_aen_lock initializer.
+-------------------------------------------------------------------------------
Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx
index ce0fdf349a81..27a91cf43d6d 100644
--- a/Documentation/scsi/LICENSE.qla2xxx
+++ b/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2011 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
index ab899591ecb7..78c169f0d7c6 100644
--- a/Documentation/scsi/LICENSE.qla4xxx
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2011 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux iSCSI Driver
This program includes a device driver for Linux 3.x.
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 685bf3582abe..f346abbdd6ff 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -112,10 +112,8 @@ attempted).
MINOR NUMBERS
-The tape driver currently supports 128 drives by default. This number
-can be increased by editing st.h and recompiling the driver if
-necessary. The upper limit is 2^17 drives if 4 modes for each drive
-are used.
+The tape driver currently supports up to 2^17 drives if 4 modes for
+each drive are used.
The minor numbers consist of the following bit fields:
diff --git a/MAINTAINERS b/MAINTAINERS
index fdc0119963e7..efffdb4c7118 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1645,7 +1645,6 @@ F: drivers/bcma/
F: include/linux/bcma/
BROCADE BFA FC SCSI DRIVER
-M: Jing Huang <huangj@brocade.com>
M: Krishna C Gudipati <kgudipat@brocade.com>
L: linux-scsi@vger.kernel.org
S: Supported
@@ -3431,6 +3430,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
+IBM Power Virtual SCSI/FC Device Drivers
+M: Robert Jennings <rcj@linux.vnet.ibm.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/ibmvscsi/
+X: drivers/scsi/ibmvscsi/ibmvstgt.c
+
IBM ServeRAID RAID DRIVER
P: Jack Hammer
M: Dave Jeffery <ipslinux@adaptec.com>
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8e1039c8e159..abd729fc094a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5253,16 +5253,20 @@ bool ata_link_offline(struct ata_link *link)
#ifdef CONFIG_PM
static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
- int wait)
+ int *async)
{
struct ata_link *link;
unsigned long flags;
- int rc;
+ int rc = 0;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ if (async) {
+ *async = -EAGAIN;
+ return 0;
+ }
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@@ -5271,10 +5275,10 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_lock_irqsave(ap->lock, flags);
ap->pm_mesg = mesg;
- if (wait) {
- rc = 0;
+ if (async)
+ ap->pm_result = async;
+ else
ap->pm_result = &rc;
- }
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5287,7 +5291,7 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
/* wait and check result */
- if (wait) {
+ if (!async) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@@ -5295,9 +5299,8 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
return rc;
}
-static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
{
- struct ata_port *ap = to_ata_port(dev);
unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
@@ -5312,10 +5315,17 @@ static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
if (mesg.event == PM_EVENT_SUSPEND)
ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
- rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
+ rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
return rc;
}
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+{
+ struct ata_port *ap = to_ata_port(dev);
+
+ return __ata_port_suspend_common(ap, mesg, NULL);
+}
+
static int ata_port_suspend(struct device *dev)
{
if (pm_runtime_suspended(dev))
@@ -5340,16 +5350,22 @@ static int ata_port_poweroff(struct device *dev)
return ata_port_suspend_common(dev, PMSG_HIBERNATE);
}
-static int ata_port_resume_common(struct device *dev)
+static int __ata_port_resume_common(struct ata_port *ap, int *async)
{
- struct ata_port *ap = to_ata_port(dev);
int rc;
rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
return rc;
}
+static int ata_port_resume_common(struct device *dev)
+{
+ struct ata_port *ap = to_ata_port(dev);
+
+ return __ata_port_resume_common(ap, NULL);
+}
+
static int ata_port_resume(struct device *dev)
{
int rc;
@@ -5382,6 +5398,24 @@ static const struct dev_pm_ops ata_port_pm_ops = {
.runtime_idle = ata_port_runtime_idle,
};
+/* sas ports don't participate in pm runtime management of ata_ports,
+ * and need to resume ata devices at the domain level, not the per-port
+ * level. sas suspend/resume is async to allow parallel port recovery
+ * since sas has multiple ata_port instances per Scsi_Host.
+ */
+int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+{
+ return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
+
+int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+{
+ return __ata_port_resume_common(ap, async);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
+
+
/**
* ata_host_suspend - suspend host
* @host: host to suspend
@@ -5927,24 +5961,18 @@ int ata_host_start(struct ata_host *host)
}
/**
- * ata_sas_host_init - Initialize a host struct
+ * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
* @host: host to initialize
* @dev: device host is attached to
- * @flags: host flags
* @ops: port_ops
*
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
*/
-/* KILLME - the only user left is ipr */
void ata_host_init(struct ata_host *host, struct device *dev,
- unsigned long flags, struct ata_port_operations *ops)
+ struct ata_port_operations *ops)
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->dev = dev;
- host->flags = flags;
host->ops = ops;
}
@@ -6388,6 +6416,7 @@ static int __init ata_parse_force_one(char **cur,
{ "nohrst", .lflags = ATA_LFLAG_NO_HRST },
{ "nosrst", .lflags = ATA_LFLAG_NO_SRST },
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+ { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7d4535e989bf..100428dde421 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2623,6 +2623,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
*/
while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
max_tries++;
+ if (link->flags & ATA_LFLAG_RST_ONCE)
+ max_tries = 1;
if (link->flags & ATA_LFLAG_NO_HRST)
hardreset = NULL;
if (link->flags & ATA_LFLAG_NO_SRST)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d99db5623acf..fb69baa06ca8 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1666,7 +1666,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
"MEM failed\n", ioc->name);
- return r;
+ goto out_pci_disable_device;
}
if (sizeof(dma_addr_t) > 4) {
@@ -1690,8 +1690,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
- pci_release_selected_regions(pdev, ioc->bars);
- return r;
+ goto out_pci_release_region;
}
} else {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
@@ -1704,8 +1703,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
- pci_release_selected_regions(pdev, ioc->bars);
- return r;
+ goto out_pci_release_region;
}
}
@@ -1735,8 +1733,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
" memory!\n", ioc->name);
- pci_release_selected_regions(pdev, ioc->bars);
- return -EINVAL;
+ r = -EINVAL;
+ goto out_pci_release_region;
}
ioc->memmap = mem;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
@@ -1750,6 +1748,12 @@ mpt_mapresources(MPT_ADAPTER *ioc)
ioc->pio_chip = (SYSIF_REGS __iomem *)port;
return 0;
+
+out_pci_release_region:
+ pci_release_selected_regions(pdev, ioc->bars);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+ return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index aff8621de806..f6adde44f226 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
+ atomic_set(&port->units, 0);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e37f04551948..f2dd3a0a39eb 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -39,19 +39,25 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}
-static int zfcp_ccw_activate(struct ccw_device *cdev)
-
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
+ zfcp_erp_clear_adapter_status(adapter, clear);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "ccresu2");
+ tag);
zfcp_erp_wait(adapter);
- flush_work(&adapter->scan_work);
+ flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
@@ -164,26 +170,34 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
- zfcp_ccw_activate(cdev);
+ zfcp_ccw_activate(cdev, 0, "ccsonl1");
+ /* scan for remote ports
+ either at the end of any successful adapter recovery
+ or only after the adapter recovery for setting a device online */
+ zfcp_fc_inverse_conditional_port_scan(adapter);
+ flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
- * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
+ zfcp_erp_set_adapter_status(adapter, set);
+ zfcp_erp_adapter_shutdown(adapter, 0, tag);
zfcp_erp_wait(adapter);
zfcp_ccw_adapter_put(adapter);
@@ -191,6 +205,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
}
/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
+/**
* zfcp_ccw_notify - ccw notify function
* @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
@@ -207,6 +233,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
switch (event) {
case CIO_GONE:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccnigo1", adapter);
+ break;
+ }
dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
@@ -216,6 +247,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccniop1", adapter);
+ break;
+ }
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
@@ -251,6 +287,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter);
}
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+ return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+ /* trace records for thaw and final shutdown during suspend
+ can only be found in system dump until the end of suspend
+ but not after resume because it's based on the memory image
+ right after the very first suspend (freeze) callback */
+ zfcp_ccw_activate(cdev, 0, "ccthaw1");
+ return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+ return 0;
+}
+
struct ccw_driver zfcp_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -263,7 +321,7 @@ struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
- .freeze = zfcp_ccw_set_offline,
- .thaw = zfcp_ccw_activate,
- .restore = zfcp_ccw_activate,
+ .freeze = zfcp_ccw_suspend,
+ .thaw = zfcp_ccw_thaw,
+ .restore = zfcp_ccw_resume,
};
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fbd8b4db6025..49b82e46629e 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, port->adapter->scsi_host) {
+ shost_for_each_device(sdev, adapter->scsi_host) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3c1d22097ad0..e1a8cc2526e7 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
length = min((u16)sizeof(struct qdio_buffer),
(u16)ZFCP_DBF_PAY_MAX_REC);
- while ((char *)pl[payload->counter] && payload->counter < scount) {
+ while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
payload->counter++;
@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
spin_unlock_irqrestore(&dbf->pay_lock, flags);
}
+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_BASIC;
+
+ debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 714f087eb7a9..3ac7a4b30dd9 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
+ ZFCP_DBF_HBA_BASIC = 4,
};
/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2955e1a3deaf..1305955cbf59 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -77,6 +77,7 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
@@ -204,6 +205,7 @@ struct zfcp_port {
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list; /* head of logical unit list */
rwlock_t unit_list_lock; /* unit list lock */
+ atomic_t units; /* zfcp_unit count */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 92d3df6ac8ba..4133ab6e20f1 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1230,7 +1230,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
- queue_work(adapter->work_queue, &adapter->scan_work);
+ zfcp_fc_conditional_port_scan(adapter);
queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 36f422770ff5..1d3dd3f7d699 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -98,6 +99,8 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
+extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
+extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
/* zfcp_fsf.c */
extern struct kmem_cache *zfcp_fsf_qtcb_cache;
@@ -158,6 +161,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 88688a80b2c1..ff598cd68b2d 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -26,6 +26,27 @@ static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_FAB] = 0x000000,
};
+static bool no_auto_port_rescan;
+module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
+MODULE_PARM_DESC(no_auto_port_rescan,
+ "no automatic port_rescan (default off)");
+
+void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+ if (no_auto_port_rescan)
+ return;
+
+ queue_work(adapter->work_queue, &adapter->scan_work);
+}
+
+void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+ if (!no_auto_port_rescan)
+ return;
+
+ queue_work(adapter->work_queue, &adapter->scan_work);
+}
+
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
@@ -206,7 +227,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
- queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
+ zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e1c1efc2c5a0..c96320d79fbc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
return;
}
- zfcp_dbf_hba_fsf_uss("fssrh_2", req);
+ zfcp_dbf_hba_fsf_uss("fssrh_4", req);
switch (sr_buf->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
@@ -257,7 +257,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
zfcp_cfdc_adapter_access_changed(adapter);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
- queue_work(adapter->work_queue, &adapter->scan_work);
+ zfcp_fc_conditional_port_scan(adapter);
break;
case FSF_STATUS_READ_CFDC_UPDATED:
zfcp_cfdc_adapter_access_changed(adapter);
@@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
}
}
+#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
+#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
+#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
+#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
+#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
+#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+ u32 fdmi_speed = 0;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+ fdmi_speed |= FC_PORTSPEED_1GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+ fdmi_speed |= FC_PORTSPEED_2GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+ fdmi_speed |= FC_PORTSPEED_4GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+ fdmi_speed |= FC_PORTSPEED_10GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+ fdmi_speed |= FC_PORTSPEED_8GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+ fdmi_speed |= FC_PORTSPEED_16GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+ fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+ return fdmi_speed;
+}
+
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_name(shost) = nsp->fl_wwpn;
fc_host_node_name(shost) = nsp->fl_wwnn;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->hydra_version = bottom->adapter_type;
@@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) = bottom->supported_speed;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@@ -771,12 +801,14 @@ out:
static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
@@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth1", req);
+ zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED |
ZFCP_STATUS_LUN_SHARED |
@@ -1856,11 +1890,13 @@ out:
static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
struct latency_cont *lat = NULL;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;
@@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
!(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ zfcp_sdev = sdev_to_zfcp(scsi->device);
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
{
struct scsi_cmnd *scmnd = req->data;
struct scsi_device *sdev = scmnd->device;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b9fffc8d94a7..50b5615848f6 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
- struct qdio_buffer_element *sbale;
int sbal_no, sbal_idx;
- void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
- u64 req_id;
- u8 scount;
if (unlikely(qdio_err)) {
- memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
if (zfcp_adapter_multi_buffer_active(adapter)) {
+ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+ struct qdio_buffer_element *sbale;
+ u64 req_id;
+ u8 scount;
+
+ memset(pl, 0,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
req_id = (u64) sbale->addr;
- scount = sbale->scount + 1; /* incl. signaling SBAL */
+ scount = min(sbale->scount + 1,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+ /* incl. signaling SBAL */
for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index c66af27b230b..1e0eb089dfba 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0) {
+ retval = -EBUSY;
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ goto out;
+ }
+ /* port is about to be removed, so no more unit_add */
+ atomic_set(&port->units, -1);
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
+ int retval;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
- if (zfcp_unit_add(port, fcp_lun))
- return -EINVAL;
+ retval = zfcp_unit_add(port, fcp_lun);
+ if (retval)
+ return retval;
return count;
}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 3f2bff0d3aa2..1cd2b99ab256 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
- put_device(&unit->port->dev);
+ atomic_dec(&unit->port->units);
kfree(unit);
}
@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
+ int retval = 0;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) == -1) {
+ /* port is already gone */
+ retval = -ENODEV;
+ goto out;
+ }
unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
- return -EEXIST;
+ retval = -EEXIST;
+ goto out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
- if (!unit)
- return -ENOMEM;
+ if (!unit) {
+ retval = -ENOMEM;
+ goto out;
+ }
unit->port = port;
unit->fcp_lun = fcp_lun;
@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}
- get_device(&port->dev);
-
if (device_register(&unit->dev)) {
put_device(&unit->dev);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->dev);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}
+ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
zfcp_unit_scsi_scan(unit);
- return 0;
+out:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return retval;
}
/**
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index d2e9e933f7a3..07d2cb126d93 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -48,7 +48,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
}
if (sreset & BE2_SET_RESET) {
- printk(KERN_ERR "Soft Reset did not deassert\n");
+ printk(KERN_ERR DRV_NAME
+ " Soft Reset did not deassert\n");
return -EIO;
}
pconline1 = BE2_MPU_IRAM_ONLINE;
@@ -67,7 +68,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
i++;
}
if (sreset & BE2_SET_RESET) {
- printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
+ printk(KERN_ERR DRV_NAME
+ " MPU Online Soft Reset did not deassert\n");
return -EIO;
}
return 0;
@@ -93,8 +95,9 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
}
if ((status & 0x80000000) || (!num_loop)) {
- printk(KERN_ERR "Failed in be_chk_reset_complete"
- "status = 0x%x\n", status);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : Failed in be_chk_reset_complete"
+ "status = 0x%x\n", status);
return -EIO;
}
@@ -169,6 +172,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
be_dws_le_to_cpu(compl, 4);
@@ -177,9 +181,12 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
if (compl_status != MCC_STATUS_SUCCESS) {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
- dev_err(&ctrl->pdev->dev,
- "error in cmd completion: status(compl/extd)=%d/%d\n",
- compl_status, extd_status);
+
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+
return -EBUSY;
}
return 0;
@@ -233,22 +240,29 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
{
switch (evt->port_link_status) {
case ASYNC_EVENT_LINK_DOWN:
- SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
- evt->physical_port);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BC_%d : Link Down on Physical Port %d\n",
+ evt->physical_port);
+
phba->state |= BE_ADAPTER_LINK_DOWN;
iscsi_host_for_each_session(phba->shost,
be2iscsi_fail_session);
break;
case ASYNC_EVENT_LINK_UP:
phba->state = BE_ADAPTER_UP;
- SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
- evt->physical_port);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BC_%d : Link UP on Physical Port %d\n",
+ evt->physical_port);
break;
default:
- SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
- "Physical Port %d\n",
- evt->port_link_status,
- evt->physical_port);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BC_%d : Unexpected Async Notification %d on"
+ "Physical Port %d\n",
+ evt->port_link_status,
+ evt->physical_port);
}
}
@@ -279,9 +293,11 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
beiscsi_async_link_state_process(phba,
(struct be_async_event_link_state *) compl);
else
- SE_DEBUG(DBG_LVL_1,
- " Unsupported Async Event, flags"
- " = 0x%08x\n", compl->flags);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Unsupported Async Event, flags"
+ " = 0x%08x\n", compl->flags);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(ctrl, compl);
@@ -312,7 +328,10 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
udelay(100);
}
if (i == mcc_timeout) {
- dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : mccq poll timed out\n");
+
return -EBUSY;
}
return 0;
@@ -338,7 +357,11 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
break;
if (cnt > 12000000) {
- dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : mbox_db poll timed out\n");
+
return -EBUSY;
}
@@ -360,6 +383,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val |= MPU_MAILBOX_DB_HI_MASK;
@@ -368,7 +392,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
- SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : be_mbox_db_ready_wait failed\n");
+
return status;
}
val = 0;
@@ -379,18 +406,27 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
- SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : be_mbox_db_ready_wait failed\n");
+
return status;
}
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
if (status) {
- SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : After be_mcc_compl_process\n");
+
return status;
}
} else {
- dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : Invalid Mailbox Completion\n");
+
return -EBUSY;
}
return 0;
@@ -436,7 +472,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
if (status)
return status;
} else {
- dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : invalid mailbox completion\n");
+
return -EBUSY;
}
return 0;
@@ -528,7 +567,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -563,10 +601,10 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
u8 *endian_check;
- SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -583,7 +621,8 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
status = be_mbox_notify(ctrl);
if (status)
- SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : be_cmd_fw_initialize Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
@@ -596,11 +635,11 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_cq_create *req = embedded_payload(wrb);
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
struct be_dma_mem *q_mem = &cq->dma_mem;
void *ctxt = &req->context;
int status;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -608,8 +647,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
- if (!q_mem->va)
- SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
@@ -633,8 +670,10 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
} else
- SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
- status);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
+ status);
+
spin_unlock(&ctrl->mbox_lock);
return status;
@@ -700,10 +739,14 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
u8 subsys = 0, opcode = 0;
int status;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BC_%d : In beiscsi_cmd_q_destroy "
+ "queue_type : %d\n", queue_type);
+
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -759,7 +802,6 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -830,6 +872,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
unsigned int curr_pages;
u32 internal_page_offset = 0;
@@ -860,8 +903,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (status) {
- SE_DEBUG(DBG_LVL_1,
- "FW CMD to map iscsi frags failed.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : FW CMD to map iscsi frags failed.\n");
+
goto error;
}
} while (num_pages > 0);
@@ -890,3 +934,45 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
spin_unlock(&ctrl->mbox_lock);
return status;
}
+
+/**
+ * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
+ * @phba: device priv structure instance
+ * @vlan_tag: TAG to be set
+ *
+ * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
+ *
+ * returns
+ * TAG for the MBX Cmd
+ * **/
+int be_cmd_set_vlan(struct beiscsi_hba *phba,
+ uint16_t vlan_tag)
+{
+ unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_set_vlan_req *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
+ sizeof(*req));
+
+ req->interface_hndl = phba->interface_handle;
+ req->vlan_priority = vlan_tag;
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return tag;
+}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index b0b36c6a145f..2c8f98df1287 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -348,6 +348,23 @@ struct be_cmd_get_boot_target_resp {
int boot_session_handle;
};
+struct be_cmd_reopen_session_req {
+ struct be_cmd_req_hdr hdr;
+#define BE_REOPEN_ALL_SESSIONS 0x00
+#define BE_REOPEN_BOOT_SESSIONS 0x01
+#define BE_REOPEN_A_SESSION 0x02
+ u16 reopen_type;
+ u16 rsvd;
+ u32 session_handle;
+} __packed;
+
+struct be_cmd_reopen_session_resp {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd;
+ u32 session_handle;
+} __packed;
+
+
struct be_cmd_mac_query_req {
struct be_cmd_req_hdr hdr;
u8 type;
@@ -432,6 +449,12 @@ struct be_cmd_get_def_gateway_resp {
struct ip_addr_format ip_addr;
} __packed;
+#define BEISCSI_VLAN_DISABLE 0xFFFF
+struct be_cmd_set_vlan_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 vlan_priority;
+} __packed;
/******************** Create CQ ***************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -671,6 +694,9 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
bool is_link_state_evt(u32 trailer);
+/* Configuration Functions */
+int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
struct be_default_pdu_context {
u32 dw[4];
} __packed;
@@ -911,6 +937,7 @@ struct be_cmd_get_all_if_id_req {
#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
+#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 43f35034585d..aedb0d9a9dae 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -50,21 +50,27 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
struct beiscsi_session *beiscsi_sess;
struct beiscsi_io_task *io_task;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
if (!ep) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
+ printk(KERN_ERR
+ "beiscsi_session_create: invalid ep\n");
return NULL;
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
shost = phba->shost;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_session_create\n");
+
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
- shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
- "Max cmds per session supported is %d. Using %d. "
- "\n", cmds_max,
- beiscsi_ep->phba->params.wrbs_per_cxn,
- beiscsi_ep->phba->params.wrbs_per_cxn);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Cannot handle %d cmds."
+ "Max cmds per session supported is %d. Using %d."
+ "\n", cmds_max,
+ beiscsi_ep->phba->params.wrbs_per_cxn,
+ beiscsi_ep->phba->params.wrbs_per_cxn);
+
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
}
@@ -102,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
struct iscsi_session *sess = cls_session->dd_data;
struct beiscsi_session *beiscsi_sess = sess->dd_data;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
+ printk(KERN_INFO "In beiscsi_session_destroy\n");
pci_pool_destroy(beiscsi_sess->bhs_pool);
iscsi_session_teardown(cls_session);
}
@@ -123,11 +129,13 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
struct iscsi_session *sess;
struct beiscsi_session *beiscsi_sess;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
- "from iscsi layer=%d\n", cid);
shost = iscsi_session_to_shost(cls_session);
phba = iscsi_host_priv(shost);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_create ,cid"
+ "from iscsi layer=%d\n", cid);
+
cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
if (!cls_conn)
return NULL;
@@ -154,12 +162,15 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
unsigned int cid)
{
if (phba->conn_table[cid]) {
- SE_DEBUG(DBG_LVL_1,
- "Connection table already occupied. Detected clash\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Connection table already occupied. Detected clash\n");
+
return -EINVAL;
} else {
- SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n",
- cid, beiscsi_conn);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
+ cid, beiscsi_conn);
+
phba->conn_table[cid] = beiscsi_conn;
}
return 0;
@@ -184,7 +195,6 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
@@ -195,17 +205,21 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EINVAL;
if (beiscsi_ep->phba != phba) {
- SE_DEBUG(DBG_LVL_8,
- "beiscsi_ep->hba=%p not equal to phba=%p\n",
- beiscsi_ep->phba, phba);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
+ beiscsi_ep->phba, phba);
+
return -EEXIST;
}
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
- SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n",
- beiscsi_conn, conn, beiscsi_ep->ep_cid);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
+ beiscsi_conn, conn, beiscsi_ep->ep_cid);
+
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
}
@@ -219,8 +233,9 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
ISCSI_IFACE_TYPE_IPV4,
0, 0);
if (!phba->ipv4_iface) {
- shost_printk(KERN_ERR, phba->shost, "Could not "
- "create default IPv4 address.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Could not "
+ "create default IPv4 address.\n");
return -ENODEV;
}
@@ -237,8 +252,9 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
ISCSI_IFACE_TYPE_IPV6,
0, 0);
if (!phba->ipv6_iface) {
- shost_printk(KERN_ERR, phba->shost, "Could not "
- "create default IPv6 address.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Could not "
+ "create default IPv6 address.\n");
return -ENODEV;
}
@@ -299,12 +315,14 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
iface_ip = nla_data(nla);
break;
default:
- shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
- iface_param->param);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Unsupported param %d\n",
+ iface_param->param);
}
if (!iface_ip || !iface_subnet) {
- shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : IP and Subnet Mask required\n");
return -EINVAL;
}
@@ -314,6 +332,51 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
return ret;
}
+/**
+ * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * @shost: Scsi Host for the driver instance
+ * @iface_param: Interface paramters
+ *
+ * Set the VLAN TAG for the adapter or disable
+ * the VLAN config
+ *
+ * returns
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+static int
+beiscsi_set_vlan_tag(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ /* Get the Interface Handle */
+ if (mgmt_get_all_if_id(phba)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Interface Handle Failed\n");
+ return -EIO;
+ }
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
+ ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = mgmt_set_vlan(phba,
+ *((uint16_t *)iface_param->value));
+ break;
+ default:
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : Unkown Param Type : %d\n",
+ iface_param->param);
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+
static int
beiscsi_set_ipv4(struct Scsi_Host *shost,
struct iscsi_iface_param_info *iface_param,
@@ -335,8 +398,9 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
ret = beiscsi_set_static_ip(shost, iface_param,
data, dt_len);
else
- shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
- iface_param->value[0]);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid BOOTPROTO: %d\n",
+ iface_param->value[0]);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
@@ -349,9 +413,14 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
ret = beiscsi_set_static_ip(shost, iface_param,
data, dt_len);
break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = beiscsi_set_vlan_tag(shost, iface_param);
+ break;
default:
- shost_printk(KERN_ERR, shost, "Param %d not supported\n",
- iface_param->param);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Param %d not supported\n",
+ iface_param->param);
}
return ret;
@@ -379,8 +448,9 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
ISCSI_BOOTPROTO_STATIC);
break;
default:
- shost_printk(KERN_ERR, shost, "Param %d not supported\n",
- iface_param->param);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Param %d not supported\n",
+ iface_param->param);
}
return ret;
@@ -390,6 +460,7 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
void *data, uint32_t dt_len)
{
struct iscsi_iface_param_info *iface_param = NULL;
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct nlattr *attrib;
uint32_t rm_len = dt_len;
int ret = 0 ;
@@ -404,9 +475,11 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
* BE2ISCSI only supports 1 interface
*/
if (iface_param->iface_num) {
- shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
- "Only iface_num 0 is supported.\n",
- iface_param->iface_num);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid iface_num %d."
+ "Only iface_num 0 is supported.\n",
+ iface_param->iface_num);
+
return -EINVAL;
}
@@ -420,9 +493,9 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
data, dt_len);
break;
default:
- shost_printk(KERN_ERR, shost,
- "Invalid iface type :%d passed\n",
- iface_param->iface_type);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid iface type :%d passed\n",
+ iface_param->iface_type);
break;
}
@@ -465,6 +538,27 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ len = sprintf(buf, "%s\n",
+ (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ ? "Disabled" : "Enabled");
+ break;
+ case ISCSI_NET_PARAM_VLAN_ID:
+ if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ return -EINVAL;
+ else
+ len = sprintf(buf, "%d\n",
+ (if_info.vlan_priority &
+ ISCSI_MAX_VLAN_ID));
+ break;
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ return -EINVAL;
+ else
+ len = sprintf(buf, "%d\n",
+ ((if_info.vlan_priority >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY));
+ break;
default:
WARN_ON(1);
}
@@ -486,6 +580,9 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
case ISCSI_NET_PARAM_IPV4_SUBNET:
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
len = be2iscsi_get_if_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
@@ -518,7 +615,10 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
+ beiscsi_log(beiscsi_ep->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_get_param,"
+ " param= %d\n", param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -541,9 +641,14 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ struct beiscsi_hba *phba = NULL;
int ret;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_set_param,"
+ " param= %d\n", param);
+
ret = iscsi_set_param(cls_conn, param, buf, buflen);
if (ret)
return ret;
@@ -593,7 +698,9 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
tag = be_cmd_get_initname(phba);
if (!tag) {
- SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Initiator Name Failed\n");
+
return -EBUSY;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -604,9 +711,12 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : MailBox Command Failed with "
+ "status = %d extd_status = %d\n",
+ status, extd_status);
+
free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN;
}
@@ -650,7 +760,9 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
tag = be_cmd_get_port_speed(phba);
if (!tag) {
- SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Port Speed Failed\n");
+
return -EBUSY;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -661,9 +773,12 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : MailBox Command Failed with "
+ "status = %d extd_status = %d\n",
+ status, extd_status);
+
free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN;
}
@@ -704,20 +819,24 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_get_host_param,"
+ " param= %d\n", param);
+
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
status = beiscsi_get_macaddr(buf, phba);
if (status < 0) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_get_macaddr Failed\n");
return status;
}
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
status = beiscsi_get_initname(buf, phba);
if (status < 0) {
- SE_DEBUG(DBG_LVL_1,
- "Retreiving Initiator Name Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Initiator Name Failed\n");
return status;
}
break;
@@ -728,8 +847,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
case ISCSI_HOST_PARAM_PORT_SPEED:
status = beiscsi_get_port_speed(shost);
if (status) {
- SE_DEBUG(DBG_LVL_1,
- "Retreiving Port Speed Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Port Speed Failed\n");
return status;
}
status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
@@ -746,7 +865,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
int rc;
if (strlen(phba->mac_address))
- return strlcpy(buf, phba->mac_address, PAGE_SIZE);
+ return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
memset(&resp, 0, sizeof(resp));
rc = mgmt_get_nic_conf(phba, &resp);
@@ -768,8 +887,12 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_hba *phba = NULL;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_get_stats\n");
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->dataout_pdus = conn->dataout_pdus_cnt;
@@ -829,11 +952,16 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
+ beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start\n");
+
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep)
- SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
+ beiscsi_log(beiscsi_conn->phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n");
beiscsi_conn->login_in_progress = 0;
beiscsi_set_params_for_offld(beiscsi_conn, &params);
@@ -907,19 +1035,27 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
unsigned int tag, wrb_num;
int ret = -ENOMEM;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_open_conn\n");
+
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
if (beiscsi_ep->ep_cid == 0xFFFF) {
- SE_DEBUG(DBG_LVL_1, "No free cid available\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : No free cid available\n");
return ret;
}
- SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n",
- beiscsi_ep->ep_cid);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
+ beiscsi_ep->ep_cid);
+
phba->ep_array[beiscsi_ep->ep_cid -
phba->fw_config.iscsi_cid_start] = ep;
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
phba->params.cxns_per_ctrl * 2)) {
- SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Failed in allocate iscsi cid\n");
goto free_ep;
}
@@ -928,9 +1064,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
sizeof(struct tcp_connect_and_offload_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for mgmt_open_connection"
- "\n");
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Failed to allocate memory for"
+ " mgmt_open_connection\n");
+
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
return -ENOMEM;
}
@@ -938,9 +1076,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
if (!tag) {
- SE_DEBUG(DBG_LVL_1,
- "mgmt_open_connection Failed for cid=%d\n",
- beiscsi_ep->ep_cid);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Failed for cid=%d\n",
+ beiscsi_ep->ep_cid);
+
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -953,9 +1092,12 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : mgmt_open_connection Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+
free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -968,7 +1110,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
beiscsi_ep->cid_vld = 1;
- SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Success\n");
}
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -996,18 +1139,19 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct iscsi_endpoint *ep;
int ret;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
if (shost)
phba = iscsi_host_priv(shost);
else {
ret = -ENXIO;
- SE_DEBUG(DBG_LVL_1, "shost is NULL\n");
+ printk(KERN_ERR
+ "beiscsi_ep_connect shost is NULL\n");
return ERR_PTR(ret);
}
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
- SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : The Adapter state is Not UP\n");
return ERR_PTR(ret);
}
@@ -1022,7 +1166,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
beiscsi_ep->openiscsi_ep = ep;
ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
if (ret) {
- SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Failed in beiscsi_open_conn\n");
goto free_ep;
}
@@ -1044,7 +1189,9 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n");
+ beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_poll\n");
+
if (beiscsi_ep->cid_vld == 1)
return 1;
else
@@ -1064,8 +1211,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
if (!tag) {
- SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n",
- beiscsi_ep->ep_cid);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : upload failed for cid 0x%x\n",
+ beiscsi_ep->ep_cid);
+
ret = -EAGAIN;
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -1086,7 +1235,8 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
if (phba->conn_table[cid])
phba->conn_table[cid] = NULL;
else {
- SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : Connection table Not occupied.\n");
return -EINVAL;
}
return 0;
@@ -1104,38 +1254,40 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_hba *phba;
unsigned int tag;
+ uint8_t mgmt_invalidate_flag, tcp_upload_flag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
- beiscsi_ep->ep_cid);
-
- if (!beiscsi_ep->conn) {
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no "
- "beiscsi_ep\n");
- return;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+ beiscsi_ep->ep_cid);
+
+ if (beiscsi_ep->conn) {
+ beiscsi_conn = beiscsi_ep->conn;
+ iscsi_suspend_queue(beiscsi_conn->conn);
+ mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
+ tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
+ } else {
+ mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
+ tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
- beiscsi_conn = beiscsi_ep->conn;
- iscsi_suspend_queue(beiscsi_conn->conn);
-
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
- beiscsi_ep->ep_cid);
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
- beiscsi_ep->ep_cid, 1,
- savecfg_flag);
+ beiscsi_ep->ep_cid,
+ mgmt_invalidate_flag,
+ savecfg_flag);
if (!tag) {
- SE_DEBUG(DBG_LVL_1,
- "mgmt_invalidate_connection Failed for cid=%d\n",
- beiscsi_ep->ep_cid);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
+ beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
free_mcc_tag(&phba->ctrl, tag);
}
- beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
+ beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
@@ -1152,6 +1304,9 @@ umode_t be2iscsi_attr_is_visible(int param_type, int param)
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
case ISCSI_NET_PARAM_IPV4_GW:
case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
return S_IRUGO;
default:
return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0b1d99c99fd2..ff73f9500b01 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -42,6 +42,7 @@
#include "be_main.h"
#include "be_iscsi.h"
#include "be_mgmt.h"
+#include "be_cmds.h"
static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
@@ -57,9 +58,105 @@ MODULE_LICENSE("GPL");
module_param(be_iopoll_budget, int, 0);
module_param(enable_msix, int, 0);
module_param(be_max_phys_size, uint, S_IRUGO);
-MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
- "contiguous memory that can be allocated."
- "Range is 16 - 128");
+MODULE_PARM_DESC(be_max_phys_size,
+ "Maximum Size (In Kilobytes) of physically contiguous "
+ "memory that can be allocated. Range is 16 - 128");
+
+#define beiscsi_disp_param(_name)\
+ssize_t \
+beiscsi_##_name##_disp(struct device *dev,\
+ struct device_attribute *attrib, char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct beiscsi_hba *phba = iscsi_host_priv(shost); \
+ uint32_t param_val = 0; \
+ param_val = phba->attr_##_name;\
+ return snprintf(buf, PAGE_SIZE, "%d\n",\
+ phba->attr_##_name);\
+}
+
+#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
+int \
+beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
+{\
+ if (val >= _minval && val <= _maxval) {\
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+ "BA_%d : beiscsi_"#_name" updated "\
+ "from 0x%x ==> 0x%x\n",\
+ phba->attr_##_name, val); \
+ phba->attr_##_name = val;\
+ return 0;\
+ } \
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
+ "BA_%d beiscsi_"#_name" attribute "\
+ "cannot be updated to 0x%x, "\
+ "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+ return -EINVAL;\
+}
+
+#define beiscsi_store_param(_name) \
+ssize_t \
+beiscsi_##_name##_store(struct device *dev,\
+ struct device_attribute *attr, const char *buf,\
+ size_t count) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);\
+ uint32_t param_val = 0;\
+ if (!isdigit(buf[0]))\
+ return -EINVAL;\
+ if (sscanf(buf, "%i", &param_val) != 1)\
+ return -EINVAL;\
+ if (beiscsi_##_name##_change(phba, param_val) == 0) \
+ return strlen(buf);\
+ else \
+ return -EINVAL;\
+}
+
+#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
+int \
+beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
+{ \
+ if (val >= _minval && val <= _maxval) {\
+ phba->attr_##_name = val;\
+ return 0;\
+ } \
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+ "BA_%d beiscsi_"#_name" attribute " \
+ "cannot be updated to 0x%x, "\
+ "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+ phba->attr_##_name = _defval;\
+ return -EINVAL;\
+}
+
+#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
+static uint beiscsi_##_name = _defval;\
+module_param(beiscsi_##_name, uint, S_IRUGO);\
+MODULE_PARM_DESC(beiscsi_##_name, _descp);\
+beiscsi_disp_param(_name)\
+beiscsi_change_param(_name, _minval, _maxval, _defval)\
+beiscsi_store_param(_name)\
+beiscsi_init_param(_name, _minval, _maxval, _defval)\
+DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
+ beiscsi_##_name##_disp, beiscsi_##_name##_store)
+
+/*
+ * When new log level added update the
+ * the MAX allowed value for log_enable
+ */
+BEISCSI_RW_ATTR(log_enable, 0x00,
+ 0xFF, 0x00, "Enable logging Bit Mask\n"
+ "\t\t\t\tInitialization Events : 0x01\n"
+ "\t\t\t\tMailbox Events : 0x02\n"
+ "\t\t\t\tMiscellaneous Events : 0x04\n"
+ "\t\t\t\tError Handling : 0x08\n"
+ "\t\t\t\tIO Path Events : 0x10\n"
+ "\t\t\t\tConfiguration Path : 0x20\n");
+
+struct device_attribute *beiscsi_attrs[] = {
+ &dev_attr_beiscsi_log_enable,
+ NULL,
+};
static int beiscsi_slave_configure(struct scsi_device *sdev)
{
@@ -112,9 +209,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
sizeof(struct invalidate_commands_params_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for"
- "mgmt_invalidate_icds\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : Failed to allocate memory for"
+ "mgmt_invalidate_icds\n");
return FAILED;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -122,9 +219,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
cid, &nonemb_cmd);
if (!tag) {
- shost_printk(KERN_WARNING, phba->shost,
- "mgmt_invalidate_icds could not be"
- " submitted\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+ "BM_%d : mgmt_invalidate_icds could not be"
+ "submitted\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -188,9 +285,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
sizeof(struct invalidate_commands_params_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for"
- "mgmt_invalidate_icds\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : Failed to allocate memory for"
+ "mgmt_invalidate_icds\n");
return FAILED;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -198,9 +295,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
cid, &nonemb_cmd);
if (!tag) {
- shost_printk(KERN_WARNING, phba->shost,
- "mgmt_invalidate_icds could not be"
- " submitted\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+ "BM_%d : mgmt_invalidate_icds could not be"
+ " submitted\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
@@ -389,6 +486,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
};
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+
static struct scsi_host_template beiscsi_sht = {
.module = THIS_MODULE,
.name = "Emulex 10Gbe open-iscsi Initiator Driver",
@@ -400,6 +498,7 @@ static struct scsi_host_template beiscsi_sht = {
.eh_abort_handler = beiscsi_eh_abort,
.eh_device_reset_handler = beiscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_session_reset,
+ .shost_attrs = beiscsi_attrs,
.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
.can_queue = BE2_IO_DEPTH,
.this_id = -1,
@@ -419,8 +518,8 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
if (!shost) {
- dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
- "iscsi_host_alloc failed\n");
+ dev_err(&pcidev->dev,
+ "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
return NULL;
}
shost->dma_boundary = pcidev->dma_mask;
@@ -510,8 +609,8 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
ret = pci_enable_device(pcidev);
if (ret) {
- dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
- "failed. Returning -ENODEV\n");
+ dev_err(&pcidev->dev,
+ "beiscsi_enable_pci - enable device failed\n");
return ret;
}
@@ -576,8 +675,9 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
+ BE2_TMFS) / 512) + 1) * 512;
phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
? 1024 : phba->params.num_eq_entries;
- SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
- phba->params.num_eq_entries);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->params.num_eq_entries=%d\n",
+ phba->params.num_eq_entries);
phba->params.num_cq_entries =
(((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
+ BE2_TMFS) / 512) + 1) * 512;
@@ -621,8 +721,6 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
phba = pbe_eq->phba;
mcc = &phba->ctrl.mcc_obj.cq;
eqe = queue_tail_node(eq);
- if (!eqe)
- SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
num_eq_processed = 0;
@@ -667,8 +765,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
eq = &pbe_eq->q;
cq = pbe_eq->cq;
eqe = queue_tail_node(eq);
- if (!eqe)
- SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
phba = pbe_eq->phba;
num_eq_processed = 0;
@@ -743,8 +839,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
mcc = &phba->ctrl.mcc_obj.cq;
index = 0;
eqe = queue_tail_node(eq);
- if (!eqe)
- SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
num_ioeq_processed = 0;
num_mcceq_processed = 0;
@@ -842,9 +936,10 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
phba->msi_name[i],
&phwi_context->be_eq[i]);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "beiscsi_init_irqs-Failed to"
- "register msix for i = %d\n", i);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_init_irqs-Failed to"
+ "register msix for i = %d\n",
+ i);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -860,8 +955,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
if (ret) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
- "Failed to register beiscsi_msix_mcc\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
+ "BM_%d : beiscsi_init_irqs-"
+ "Failed to register beiscsi_msix_mcc\n");
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -870,8 +966,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
"beiscsi", phba);
if (ret) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
- "Failed to register irq\\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_init_irqs-"
+ "Failed to register irq\\n");
return ret;
}
}
@@ -922,7 +1019,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
case ISCSI_OP_REJECT:
WARN_ON(!pbuffer);
WARN_ON(!(buf_len == 48));
- SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : In ISCSI_OP_REJECT\n");
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
@@ -932,11 +1031,12 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
login_hdr->itt = io_task->libiscsi_itt;
break;
default:
- shost_printk(KERN_WARNING, phba->shost,
- "Unrecognized opcode 0x%x in async msg\n",
- (ppdu->
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Unrecognized opcode 0x%x in async msg\n",
+ (ppdu->
dw[offsetof(struct amap_pdu_base, opcode) / 32]
- & PDUBASE_OPCODE_MASK));
+ & PDUBASE_OPCODE_MASK));
return 1;
}
@@ -951,9 +1051,11 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
struct sgl_handle *psgl_handle;
if (phba->io_sgl_hndl_avbl) {
- SE_DEBUG(DBG_LVL_8,
- "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
- phba->io_sgl_alloc_index);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : In alloc_io_sgl_handle,"
+ " io_sgl_alloc_index=%d\n",
+ phba->io_sgl_alloc_index);
+
psgl_handle = phba->io_sgl_hndl_base[phba->
io_sgl_alloc_index];
phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
@@ -971,17 +1073,20 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
- phba->io_sgl_free_index);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : In free_,io_sgl_free_index=%d\n",
+ phba->io_sgl_free_index);
+
if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
/*
* this can happen if clean_task is called on a task that
* failed in xmit_task or alloc_pdu.
*/
- SE_DEBUG(DBG_LVL_8,
- "Double Free in IO SGL io_sgl_free_index=%d,"
- "value there=%p\n", phba->io_sgl_free_index,
- phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
+ "value there=%p\n", phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base
+ [phba->io_sgl_free_index]);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1043,11 +1148,12 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
else
pwrb_context->free_index++;
- SE_DEBUG(DBG_LVL_8,
- "FREE WRB: pwrb_handle=%p free_index=0x%x"
- "wrb_handles_available=%d\n",
- pwrb_handle, pwrb_context->free_index,
- pwrb_context->wrb_handles_available);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
+ "wrb_handles_available=%d\n",
+ pwrb_handle, pwrb_context->free_index,
+ pwrb_context->wrb_handles_available);
}
static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -1057,8 +1163,11 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
- SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
- phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
+ phba->eh_sgl_alloc_index,
+ phba->eh_sgl_alloc_index);
+
phba->eh_sgl_hndl_avbl--;
if (phba->eh_sgl_alloc_index ==
(phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
@@ -1075,16 +1184,20 @@ void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
- phba->eh_sgl_free_index);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BM_%d : In free_mgmt_sgl_handle,"
+ "eh_sgl_free_index=%d\n",
+ phba->eh_sgl_free_index);
+
if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
/*
* this can happen if clean_task is called on a task that
* failed in xmit_task or alloc_pdu.
*/
- SE_DEBUG(DBG_LVL_8,
- "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
- phba->eh_sgl_free_index);
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BM_%d : Double Free in eh SGL ,"
+ "eh_sgl_free_index=%d\n",
+ phba->eh_sgl_free_index);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1326,9 +1439,10 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
case HWH_TYPE_LOGIN:
- SE_DEBUG(DBG_LVL_1,
- "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
- "- Solicited path\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
+ " hwi_complete_cmd- Solicited path\n");
break;
case HWH_TYPE_NOP:
@@ -1336,13 +1450,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
default:
- shost_printk(KERN_WARNING, phba->shost,
- "In hwi_complete_cmd, unknown type = %d"
- "wrb_index 0x%x CID 0x%x\n", type,
- ((psol->dw[offsetof(struct amap_iscsi_wrb,
- type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
- ((psol->dw[offsetof(struct amap_sol_cqe,
- cid) / 32] & SOL_CID_MASK) >> 6));
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : In hwi_complete_cmd, unknown type = %d"
+ "wrb_index 0x%x CID 0x%x\n", type,
+ ((psol->dw[offsetof(struct amap_iscsi_wrb,
+ type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
+ ((psol->dw[offsetof(struct amap_sol_cqe,
+ cid) / 32] & SOL_CID_MASK) >> 6));
break;
}
@@ -1397,10 +1512,11 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
break;
default:
pbusy_list = NULL;
- shost_printk(KERN_WARNING, phba->shost,
- "Unexpected code=%d\n",
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- code) / 32] & PDUCQE_CODE_MASK);
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Unexpected code=%d\n",
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ code) / 32] & PDUCQE_CODE_MASK);
return NULL;
}
@@ -1425,8 +1541,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
}
static unsigned int
-hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
- unsigned int is_header, unsigned int cq_index)
+hwi_update_async_writables(struct beiscsi_hba *phba,
+ struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int is_header, unsigned int cq_index)
{
struct list_head *pbusy_list;
struct async_pdu_handle *pasync_handle;
@@ -1463,9 +1580,10 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
}
if (!writables) {
- SE_DEBUG(DBG_LVL_1,
- "Duplicate notification received - index 0x%x!!\n",
- cq_index);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Duplicate notification received - index 0x%x!!\n",
+ cq_index);
WARN_ON(1);
}
@@ -1616,8 +1734,8 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
pdpdu_cqe, &cq_index);
BUG_ON(pasync_handle->is_header != 0);
if (pasync_handle->consumed == 0)
- hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
- cq_index);
+ hwi_update_async_writables(phba, pasync_ctx,
+ pasync_handle->is_header, cq_index);
hwi_free_async_msg(phba, pasync_handle->cri);
hwi_post_async_buffers(phba, pasync_handle->is_header);
@@ -1745,8 +1863,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
pdpdu_cqe, &cq_index);
if (pasync_handle->consumed == 0)
- hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
- cq_index);
+ hwi_update_async_writables(phba, pasync_ctx,
+ pasync_handle->is_header, cq_index);
+
hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
hwi_post_async_buffers(phba, pasync_handle->is_header);
}
@@ -1774,9 +1893,10 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
beiscsi_async_link_state_process(phba,
(struct be_async_event_link_state *) mcc_compl);
else
- SE_DEBUG(DBG_LVL_1,
- " Unsupported Async Event, flags"
- " = 0x%08x\n", mcc_compl->flags);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
+ "BM_%d : Unsupported Async Event, flags"
+ " = 0x%08x\n",
+ mcc_compl->flags);
} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -1801,6 +1921,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
struct dmsg_cqe *dmsg;
unsigned int num_processed = 0;
unsigned int tot_nump = 0;
+ unsigned short code = 0, cid = 0;
struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
@@ -1814,10 +1935,11 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
CQE_VALID_MASK) {
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- ep = phba->ep_array[(u32) ((sol->
- dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
+ cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
+ CQE_CID_MASK) >> 6);
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
+ CQE_CODE_MASK);
+ ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@@ -1829,32 +1951,41 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
num_processed = 0;
}
- switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK) {
+ switch (code) {
case SOL_CMD_COMPLETE:
hwi_complete_cmd(beiscsi_conn, phba, sol);
break;
case DRIVERMSG_NOTIFY:
- SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Received DRIVERMSG_NOTIFY\n");
+
dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
break;
case UNSOL_HDR_NOTIFY:
- SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
+
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
break;
case UNSOL_DATA_NOTIFY:
- SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Received UNSOL_DATA_NOTIFY\n");
+
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
break;
case CXN_INVALIDATE_INDEX_NOTIFY:
case CMD_INVALIDATED_NOTIFY:
case CXN_INVALIDATE_NOTIFY:
- SE_DEBUG(DBG_LVL_1,
- "Ignoring CQ Error notification for cmd/cxn"
- "invalidate\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Ignoring CQ Error notification for"
+ " cmd/cxn invalidate\n");
break;
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1864,17 +1995,16 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CMD_CXN_KILLED_ITT_INVALID:
case CMD_CXN_KILLED_SEQ_OUTOFORDER:
case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
- SE_DEBUG(DBG_LVL_1,
- "CQ Error notification for cmd.. "
- "code %d cid 0x%x\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK,
- (sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & SOL_CID_MASK));
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : CQ Error notification for cmd.. "
+ "code %d cid 0x%x\n", code, cid);
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
- SE_DEBUG(DBG_LVL_1,
- "Digest error on def pdu ring, dropping..\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Digest error on def pdu ring,"
+ " dropping..\n");
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
break;
@@ -1892,33 +2022,31 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_OVER_RUN_RESIDUAL:
case CXN_KILLED_UNDER_RUN_RESIDUAL:
case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
- "0x%x...\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK,
- (sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK));
- iscsi_conn_failure(beiscsi_conn->conn,
- ISCSI_ERR_CONN_FAILED);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : CQ Error %d, reset CID 0x%x...\n",
+ code, cid);
+ if (beiscsi_conn)
+ iscsi_conn_failure(beiscsi_conn->conn,
+ ISCSI_ERR_CONN_FAILED);
break;
case CXN_KILLED_RST_SENT:
case CXN_KILLED_RST_RCVD:
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
- "received/sent on CID 0x%x...\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK,
- (sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK));
- iscsi_conn_failure(beiscsi_conn->conn,
- ISCSI_ERR_CONN_FAILED);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : CQ Error %d, reset"
+ "received/sent on CID 0x%x...\n",
+ code, cid);
+ if (beiscsi_conn)
+ iscsi_conn_failure(beiscsi_conn->conn,
+ ISCSI_ERR_CONN_FAILED);
break;
default:
- SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
- "received on CID 0x%x...\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK,
- (sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK));
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : CQ Error Invalid code= %d "
+ "received on CID 0x%x...\n",
+ code, cid);
break;
}
@@ -1977,7 +2105,10 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
if (ret < budget) {
phba = pbe_eq->phba;
blk_iopoll_complete(iop);
- SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : rearm pbe_eq->q.id =%d\n",
+ pbe_eq->q.id);
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
return ret;
@@ -2348,16 +2479,16 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
kzalloc(sizeof(struct wrb_handle *) *
phba->params.wrbs_per_cxn, GFP_KERNEL);
if (!pwrb_context->pwrb_handle_base) {
- shost_printk(KERN_ERR, phba->shost,
- "Mem Alloc Failed. Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
goto init_wrb_hndl_failed;
}
pwrb_context->pwrb_handle_basestd =
kzalloc(sizeof(struct wrb_handle *) *
phba->params.wrbs_per_cxn, GFP_KERNEL);
if (!pwrb_context->pwrb_handle_basestd) {
- shost_printk(KERN_ERR, phba->shost,
- "Mem Alloc Failed. Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
goto init_wrb_hndl_failed;
}
if (!num_cxn_wrbh) {
@@ -2438,12 +2569,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
+ mem_descr->mem_array[0].virtual_address);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
pasync_ctx->async_header.va_base =
mem_descr->mem_array[0].virtual_address;
@@ -2454,24 +2586,27 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_RING;
if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
+ mem_descr->mem_array[0].virtual_address);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
+
pasync_ctx->async_header.ring_base =
mem_descr->mem_array[0].virtual_address;
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
+ mem_descr->mem_array[0].virtual_address);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
pasync_ctx->async_header.handle_base =
mem_descr->mem_array[0].virtual_address;
@@ -2482,12 +2617,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_RING;
if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_RING va=%p\n",
+ mem_descr->mem_array[0].virtual_address);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
pasync_ctx->async_data.ring_base =
mem_descr->mem_array[0].virtual_address;
@@ -2495,8 +2631,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
if (!mem_descr->mem_array[0].virtual_address)
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
pasync_ctx->async_data.handle_base =
mem_descr->mem_array[0].virtual_address;
@@ -2511,12 +2647,14 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_BUF;
if (mem_descr->mem_array[0].virtual_address) {
- SE_DEBUG(DBG_LVL_8,
- "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
- "va=%p\n", mem_descr->mem_array[0].virtual_address);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
+ mem_descr->mem_array[0].virtual_address);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "No Virtual address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address\n");
+
idx = 0;
pasync_ctx->async_data.va_base =
mem_descr->mem_array[idx].virtual_address;
@@ -2657,7 +2795,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context)
{
unsigned int i, num_eq_pages;
- int ret, eq_for_mcc;
+ int ret = 0, eq_for_mcc;
struct be_queue_info *eq;
struct be_dma_mem *mem;
void *eq_vaddress;
@@ -2684,8 +2822,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
ret = be_fill_queue(eq, phba->params.num_eq_entries,
sizeof(struct be_eq_entry), eq_vaddress);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_fill_queue Failed for EQ\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for EQ\n");
goto create_eq_error;
}
@@ -2693,12 +2831,15 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
phwi_context->cur_eqd);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "beiscsi_cmd_eq_create"
- "Failedfor EQ\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_cmd_eq_create"
+ "Failed for EQ\n");
goto create_eq_error;
}
- SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eqid = %d\n",
+ phwi_context->be_eq[i].q.id);
}
return 0;
create_eq_error:
@@ -2717,7 +2858,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context)
{
unsigned int i, num_cq_pages;
- int ret;
+ int ret = 0;
struct be_queue_info *cq, *eq;
struct be_dma_mem *mem;
struct be_eq_obj *pbe_eq;
@@ -2742,8 +2883,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_fill_queue Failed for ISCSI CQ\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed "
+ "for ISCSI CQ\n");
goto create_cq_error;
}
@@ -2751,14 +2893,14 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
false, 0);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "beiscsi_cmd_eq_create"
- "Failed for ISCSI CQ\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_cmd_eq_create"
+ "Failed for ISCSI CQ\n");
goto create_cq_error;
}
- SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
- cq->id, eq->id);
- SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi cq_id is %d for eq_id %d\n"
+ "iSCSI CQ CREATED\n", cq->id, eq->id);
}
return 0;
@@ -2799,8 +2941,8 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_fill_queue Failed for DEF PDU HDR\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2809,13 +2951,15 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
def_pdu_ring_sz,
phba->params.defpdu_hdr_sz);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
return ret;
}
phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
- SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
- phwi_context->be_def_hdrq.id);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi def pdu id is %d\n",
+ phwi_context->be_def_hdrq.id);
+
hwi_post_async_buffers(phba, 1);
return 0;
}
@@ -2844,8 +2988,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_fill_queue Failed for DEF PDU DATA\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2854,16 +2998,20 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
def_pdu_ring_sz,
phba->params.defpdu_data_sz);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "be_cmd_create_default_pdu_queue Failed"
- " for DEF PDU DATA\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d be_cmd_create_default_pdu_queue"
+ " Failed for DEF PDU DATA\n");
return ret;
}
phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
- SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
- phwi_context->be_def_dataq.id);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi def data id is %d\n",
+ phwi_context->be_def_dataq.id);
+
hwi_post_async_buffers(phba, 0);
- SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+
return 0;
}
@@ -2889,13 +3037,14 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
(pm_arr->size / PAGE_SIZE));
page_offset += pm_arr->size / PAGE_SIZE;
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "post sgl failed.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : post sgl failed.\n");
return status;
}
pm_arr++;
}
- SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : POSTED PAGES\n");
return 0;
}
@@ -2945,8 +3094,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!pwrb_arr) {
- shost_printk(KERN_ERR, phba->shost,
- "Memory alloc failed in create wrb ring.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Memory alloc failed in create wrb ring.\n");
return -ENOMEM;
}
wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
@@ -2990,8 +3139,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
&phwi_context->be_wrbq[i]);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "wrbq create failed.");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : wrbq create failed.");
kfree(pwrb_arr);
return status;
}
@@ -3127,7 +3276,6 @@ static int find_num_cpus(void)
if (num_cpus >= MAX_CPUS)
num_cpus = MAX_CPUS - 1;
- SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
return num_cpus;
}
@@ -3150,7 +3298,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
status = beiscsi_create_eqs(phba, phwi_context);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EQ not created\n");
goto error;
}
@@ -3160,51 +3309,55 @@ static int hwi_init_port(struct beiscsi_hba *phba)
status = mgmt_check_supported_fw(ctrl, phba);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "Unsupported fw version\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Unsupported fw version\n");
goto error;
}
status = beiscsi_create_cqs(phba, phwi_context);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : CQ not created\n");
goto error;
}
status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
def_pdu_ring_sz);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "Default Header not created\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Header not created\n");
goto error;
}
status = beiscsi_create_def_data(phba, phwi_context,
phwi_ctrlr, def_pdu_ring_sz);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "Default Data not created\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Data not created\n");
goto error;
}
status = beiscsi_post_pages(phba);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Post SGL Pages Failed\n");
goto error;
}
status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "WRB Rings not created\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : WRB Rings not created\n");
goto error;
}
- SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_port success\n");
return 0;
error:
- shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_port failed");
hwi_cleanup(phba);
return status;
}
@@ -3217,12 +3370,13 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
- SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
- phwi_ctrlr->phwi_ctxt);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
+ phwi_ctrlr->phwi_ctxt);
} else {
- shost_printk(KERN_ERR, phba->shost,
- "HWI_MEM_ADDN_CONTEXT is more than one element."
- "Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
+ "than one element.Failing to load\n");
return -ENOMEM;
}
@@ -3232,8 +3386,9 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
hwi_init_async_pdu_ctx(phba);
if (hwi_init_port(phba) != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "hwi_init_controller failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_controller failed\n");
+
return -ENOMEM;
}
return 0;
@@ -3268,15 +3423,18 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
ret = beiscsi_get_memory(phba);
if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
- "Failed in beiscsi_alloc_memory\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe -"
+ "Failed in beiscsi_alloc_memory\n");
return ret;
}
ret = hwi_init_controller(phba);
if (ret)
goto free_init;
- SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Return success from beiscsi_init_controller");
+
return 0;
free_init:
@@ -3301,8 +3459,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
phba->params.ios_per_ctrl,
GFP_KERNEL);
if (!phba->io_sgl_hndl_base) {
- shost_printk(KERN_ERR, phba->shost,
- "Mem Alloc Failed. Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
return -ENOMEM;
}
phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
@@ -3311,14 +3469,14 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
GFP_KERNEL);
if (!phba->eh_sgl_hndl_base) {
kfree(phba->io_sgl_hndl_base);
- shost_printk(KERN_ERR, phba->shost,
- "Mem Alloc Failed. Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
return -ENOMEM;
}
} else {
- shost_printk(KERN_ERR, phba->shost,
- "HWI_MEM_SGLH is more than one element."
- "Failing to load\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : HWI_MEM_SGLH is more than one element."
+ "Failing to load\n");
return -ENOMEM;
}
@@ -3344,15 +3502,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
}
idx++;
}
- SE_DEBUG(DBG_LVL_8,
- "phba->io_sgl_hndl_avbl=%d"
- "phba->eh_sgl_hndl_avbl=%d\n",
- phba->io_sgl_hndl_avbl,
- phba->eh_sgl_hndl_avbl);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->io_sgl_hndl_avbl=%d"
+ "phba->eh_sgl_hndl_avbl=%d\n",
+ phba->io_sgl_hndl_avbl,
+ phba->eh_sgl_hndl_avbl);
+
mem_descr_sg = phba->init_mem;
mem_descr_sg += HWI_MEM_SGE;
- SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
- mem_descr_sg->num_elements);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "\n BM_%d : mem_descr_sg->num_elements=%d\n",
+ mem_descr_sg->num_elements);
+
arr_index = 0;
idx = 0;
while (idx < mem_descr_sg->num_elements) {
@@ -3390,17 +3551,17 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!phba->cid_array) {
- shost_printk(KERN_ERR, phba->shost,
- "Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory in "
+ "hba_setup_cid_tbls\n");
return -ENOMEM;
}
phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
if (!phba->ep_array) {
- shost_printk(KERN_ERR, phba->shost,
- "Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory in "
+ "hba_setup_cid_tbls\n");
kfree(phba->cid_array);
return -ENOMEM;
}
@@ -3433,18 +3594,22 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled) {
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
- SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : reg =x%08x addr=%p\n", reg, addr);
iowrite32(reg, addr);
}
if (!phba->msix_enabled) {
eq = &phwi_context->be_eq[0].q;
- SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eq->id=%d\n", eq->id);
+
hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
} else {
for (i = 0; i <= phba->num_cpus; i++) {
eq = &phwi_context->be_eq[i].q;
- SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eq->id=%d\n", eq->id);
hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
}
}
@@ -3462,64 +3627,60 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
iowrite32(reg, addr);
} else
- shost_printk(KERN_WARNING, phba->shost,
- "In hwi_disable_intr, Already Disabled\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : In hwi_disable_intr, Already Disabled\n");
}
+/**
+ * beiscsi_get_boot_info()- Get the boot session info
+ * @phba: The device priv structure instance
+ *
+ * Get the boot target info and store in driver priv structure
+ *
+ * return values
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
- struct be_cmd_get_boot_target_resp *boot_resp;
struct be_cmd_get_session_resp *session_resp;
struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
unsigned int tag, wrb_num;
unsigned short status, extd_status;
+ unsigned int s_handle;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int ret = -ENOMEM;
- tag = mgmt_get_boot_target(phba);
- if (!tag) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
- return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EBUSY;
- }
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
- boot_resp = embedded_payload(wrb);
-
- if (boot_resp->boot_session_handle < 0) {
- shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
- return -ENXIO;
+ /* Get the session handle of the boot target */
+ ret = be_mgmt_get_boot_shandle(phba, &s_handle);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : No boot session\n");
+ return ret;
}
-
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
sizeof(*session_resp),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for"
- "beiscsi_get_session_info\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : Failed to allocate memory for"
+ "beiscsi_get_session_info\n");
+
return -ENOMEM;
}
memset(nonemb_cmd.va, 0, sizeof(*session_resp));
- tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
+ tag = mgmt_get_session_info(phba, s_handle,
&nonemb_cmd);
if (!tag) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
- " Failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : beiscsi_get_session_info"
+ " Failed\n");
+
goto boot_freemem;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3529,9 +3690,12 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : beiscsi_get_session_info Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+
free_mcc_tag(&phba->ctrl, tag);
goto boot_freemem;
}
@@ -3611,22 +3775,22 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
ret = beiscsi_init_controller(phba);
if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost,
- "beiscsi_dev_probe - Failed in"
- "beiscsi_init_controller\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe - Failed in"
+ "beiscsi_init_controller\n");
return ret;
}
ret = beiscsi_init_sgl_handle(phba);
if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost,
- "beiscsi_dev_probe - Failed in"
- "beiscsi_init_sgl_handle\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe - Failed in"
+ "beiscsi_init_sgl_handle\n");
goto do_cleanup_ctrlr;
}
if (hba_setup_cid_tbls(phba)) {
- shost_printk(KERN_ERR, phba->shost,
- "Failed in hba_setup_cid_tbls\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed in hba_setup_cid_tbls\n");
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
goto do_cleanup_ctrlr;
@@ -3678,8 +3842,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
if (mgmt_status)
- shost_printk(KERN_WARNING, phba->shost,
- "mgmt_epfw_cleanup FAILED\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : mgmt_epfw_cleanup FAILED\n");
hwi_purge_eq(phba);
hwi_cleanup(phba);
@@ -3960,7 +4124,9 @@ free_hndls:
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
io_task->cmd_bhs = NULL;
- SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of SGL_ICD Failed\n");
return -ENOMEM;
}
@@ -3981,15 +4147,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
io_task->bhs_len = sizeof(struct be_cmd_bhs);
if (writedir) {
- memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
- AMAP_SET_BITS(struct amap_pdu_data_out, itt,
- &io_task->cmd_bhs->iscsi_data_pdu,
- (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
- AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
- &io_task->cmd_bhs->iscsi_data_pdu,
- ISCSI_OPCODE_SCSI_DATA_OUT);
- AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
- &io_task->cmd_bhs->iscsi_data_pdu, 1);
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
INI_WR_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
@@ -3998,9 +4155,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
INI_RD_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
}
- memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
- dw[offsetof(struct amap_pdu_data_out, lun) / 32],
- &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
cpu_to_be16(*(unsigned short *)
@@ -4090,8 +4244,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
break;
default:
- SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
- task->hdr->opcode & ISCSI_OPCODE_MASK);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : opcode =%d Not supported\n",
+ task->hdr->opcode & ISCSI_OPCODE_MASK);
+
return -EINVAL;
}
@@ -4123,17 +4279,22 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
io_task->scsi_cmnd = sc;
num_sg = scsi_dma_map(sc);
if (num_sg < 0) {
- SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_hba *phba = NULL;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
+ "BM_%d : scsi_dma_map Failed\n");
+
return num_sg;
}
xferlen = scsi_bufflen(sc);
sg = scsi_sglist(sc);
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
writedir = 1;
- SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
- task->imm_count);
- } else
+ else
writedir = 0;
+
return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
}
@@ -4162,14 +4323,17 @@ static int beiscsi_bsg_request(struct bsg_job *job)
job->request_payload.payload_len,
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
- "beiscsi_bsg_request\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : Failed to allocate memory for "
+ "beiscsi_bsg_request\n");
return -EIO;
}
tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
&nonemb_cmd);
if (!tag) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : be_cmd_get_mac_addr Failed\n");
+
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
@@ -4191,22 +4355,31 @@ static int beiscsi_bsg_request(struct bsg_job *job)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : be_cmd_get_mac_addr Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+
return -EIO;
}
break;
default:
- SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
- bsg_req->msgcode);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : Unsupported bsg command: 0x%x\n",
+ bsg_req->msgcode);
break;
}
return rc;
}
+void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+{
+ /* Set the logging parameter */
+ beiscsi_log_enable_init(phba, beiscsi_log_enable);
+}
+
static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
@@ -4316,18 +4489,21 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
- dev_err(&pcidev->dev, "beiscsi_dev_probe-"
- " Failed to enable pci device\n");
+ dev_err(&pcidev->dev,
+ "beiscsi_dev_probe - Failed to enable pci device\n");
return ret;
}
phba = beiscsi_hba_alloc(pcidev);
if (!phba) {
- dev_err(&pcidev->dev, "beiscsi_dev_probe-"
- " Failed in beiscsi_hba_alloc\n");
+ dev_err(&pcidev->dev,
+ "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
goto disable_pci;
}
+ /* Initialize Driver configuration Paramters */
+ beiscsi_hba_attrs_init(phba);
+
switch (pcidev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
@@ -4347,7 +4523,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
else
num_cpus = 1;
phba->num_cpus = num_cpus;
- SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : num_cpus = %d\n",
+ phba->num_cpus);
if (enable_msix) {
beiscsi_msix_enable(phba);
@@ -4356,8 +4534,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
}
ret = be_ctrl_init(phba, pcidev);
if (ret) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
- "Failed in be_ctrl_init\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed in be_ctrl_init\n");
goto hba_free;
}
@@ -4366,19 +4545,19 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
value = readl((void *)real_offset);
if (value & 0x00010000) {
gcrashmode++;
- shost_printk(KERN_ERR, phba->shost,
- "Loading Driver in crashdump mode\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Loading Driver in crashdump mode\n");
ret = beiscsi_cmd_reset_function(phba);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "Reset Failed. Aborting Crashdump\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed. Aborting Crashdump\n");
goto hba_free;
}
ret = be_chk_reset_complete(phba);
if (ret) {
- shost_printk(KERN_ERR, phba->shost,
- "Failed to get out of reset."
- "Aborting Crashdump\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset."
+ "Aborting Crashdump\n");
goto hba_free;
}
} else {
@@ -4393,8 +4572,8 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
spin_lock_init(&phba->isr_lock);
ret = mgmt_get_fw_config(&phba->ctrl, phba);
if (ret != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "Error getting fw config\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Error getting fw config\n");
goto free_port;
}
phba->shost->max_id = phba->fw_config.iscsi_cid_count;
@@ -4402,8 +4581,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
- "Failed in beiscsi_init_port\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed in beiscsi_init_port\n");
goto free_port;
}
@@ -4420,8 +4600,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->shost->host_no);
phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
- "Failed to allocate work queue\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed to allocate work queue\n");
goto free_twq;
}
@@ -4439,8 +4620,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
}
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
- "Failed to beiscsi_init_irqs\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed to beiscsi_init_irqs\n");
goto free_blkenbld;
}
hwi_enable_intr(phba);
@@ -4450,11 +4632,13 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
* log error but continue, because we may not be using
* iscsi boot.
*/
- shost_printk(KERN_ERR, phba->shost, "Could not set up "
- "iSCSI boot info.\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Could not set up "
+ "iSCSI boot info.\n");
beiscsi_create_def_ifaces(phba);
- SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
free_blkenbld:
@@ -4542,19 +4726,17 @@ static int __init beiscsi_module_init(void)
beiscsi_scsi_transport =
iscsi_register_transport(&beiscsi_iscsi_transport);
if (!beiscsi_scsi_transport) {
- SE_DEBUG(DBG_LVL_1,
- "beiscsi_module_init - Unable to register beiscsi"
- "transport.\n");
+ printk(KERN_ERR
+ "beiscsi_module_init - Unable to register beiscsi transport.\n");
return -ENOMEM;
}
- SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
- &beiscsi_iscsi_transport);
+ printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
+ &beiscsi_iscsi_transport);
ret = pci_register_driver(&beiscsi_pci_driver);
if (ret) {
- SE_DEBUG(DBG_LVL_1,
- "beiscsi_module_init - Unable to register"
- "beiscsi pci driver.\n");
+ printk(KERN_ERR
+ "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
goto unregister_iscsi_transport;
}
return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 40fea6ec879c..b8912263ef4e 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -24,6 +24,8 @@
#include <linux/pci.h>
#include <linux/if_ether.h>
#include <linux/in.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -34,7 +36,7 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "4.2.162.0"
+#define BUILD_STR "4.4.58.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -84,23 +86,7 @@
#define MAX_CMD_SZ 65536
#define IIOC_SCSI_DATA 0x05 /* Write Operation */
-#define DBG_LVL 0x00000001
-#define DBG_LVL_1 0x00000001
-#define DBG_LVL_2 0x00000002
-#define DBG_LVL_3 0x00000004
-#define DBG_LVL_4 0x00000008
-#define DBG_LVL_5 0x00000010
-#define DBG_LVL_6 0x00000020
-#define DBG_LVL_7 0x00000040
-#define DBG_LVL_8 0x00000080
-
-#define SE_DEBUG(debug_mask, fmt, args...) \
-do { \
- if (debug_mask & DBG_LVL) { \
- printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
- printk(fmt, ##args); \
- } \
-} while (0);
+#define INVALID_SESS_HANDLE 0xFFFFFFFF
#define BE_ADAPTER_UP 0x00000000
#define BE_ADAPTER_LINK_DOWN 0x00000001
@@ -351,6 +337,8 @@ struct beiscsi_hba {
struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
+ unsigned int attr_log_enable;
+
};
struct beiscsi_session {
@@ -860,4 +848,20 @@ struct hwi_context_memory {
struct hwi_async_pdu_context *pasync_ctx;
};
+/* Logging related definitions */
+#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
+#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
+#define BEISCSI_LOG_MISC 0x0004 /* Miscllaneous Events */
+#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
+#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
+#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
+
+#define beiscsi_log(phba, level, mask, fmt, arg...) \
+do { \
+ uint32_t log_value = phba->attr_log_enable; \
+ if (((mask) & log_value) || (level[1] <= '3')) \
+ shost_printk(level, phba->shost, \
+ fmt, __LINE__, ##arg); \
+} while (0)
+
#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 2a096795b9aa..aab5dd359e2c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -23,6 +23,53 @@
#include "be_mgmt.h"
#include "be_iscsi.h"
+/**
+ * mgmt_reopen_session()- Reopen a session based on reopen_type
+ * @phba: Device priv structure instance
+ * @reopen_type: Type of reopen_session FW should do.
+ * @sess_handle: Session Handle of the session to be re-opened
+ *
+ * return
+ * the TAG used for MBOX Command
+ *
+ **/
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+ unsigned int reopen_type,
+ unsigned int sess_handle)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_reopen_session_req *req;
+ unsigned int tag = 0;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In bescsi_get_boot_target\n");
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+ sizeof(struct be_cmd_reopen_session_resp));
+
+ /* set the reopen_type,sess_handle */
+ req->reopen_type = reopen_type;
+ req->session_handle = sess_handle;
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -30,7 +77,10 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
struct be_cmd_get_boot_target_req *req;
unsigned int tag = 0;
- SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In bescsi_get_boot_target\n");
+
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
@@ -62,7 +112,10 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
struct be_cmd_get_session_resp *resp;
struct be_sge *sge;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In beiscsi_get_session_info\n");
+
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
@@ -121,16 +174,16 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
- SE_DEBUG(DBG_LVL_8,
- "FW reported MAX CXNS as %d\t"
- "Max Supported = %d.\n",
- phba->fw_config.iscsi_cid_count,
- BE2_MAX_SESSIONS);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : FW reported MAX CXNS as %d\t"
+ "Max Supported = %d.\n",
+ phba->fw_config.iscsi_cid_count,
+ BE2_MAX_SESSIONS);
phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
}
} else {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed in mgmt_get_fw_config\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_get_fw_config\n");
}
spin_unlock(&ctrl->mbox_lock);
@@ -150,9 +203,9 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
sizeof(struct be_mgmt_controller_attributes),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
- SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for mgmt_check_supported_fw"
- "\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed to allocate memory for "
+ "mgmt_check_supported_fw\n");
return -ENOMEM;
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
@@ -169,18 +222,23 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (!status) {
struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
- SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
- resp->params.hba_attribs.flashrom_version_string);
- SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
- resp->params.hba_attribs.firmware_version_string);
- SE_DEBUG(DBG_LVL_8,
- "Developer Build, not performing version check...\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Firmware Version of CMD : %s\n"
+ "Firmware Version is : %s\n"
+ "Developer Build, not performing version check...\n",
+ resp->params.hba_attribs
+ .flashrom_version_string,
+ resp->params.hba_attribs.
+ firmware_version_string);
+
phba->fw_config.iscsi_features =
resp->params.hba_attribs.iscsi_features;
- SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
- phba->fw_config.iscsi_features);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->fw_config.iscsi_features = %d\n",
+ phba->fw_config.iscsi_features);
} else
- SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_check_supported_fw\n");
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
@@ -229,9 +287,10 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
OPCODE_COMMON_READ_FLASH, sizeof(*req));
break;
default:
- shost_printk(KERN_WARNING, phba->shost,
- "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
- h_vendor.vendor_cmd[0]);
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Unsupported cmd = 0x%x\n\n",
+ bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
+
spin_unlock(&ctrl->mbox_lock);
return -ENOSYS;
}
@@ -275,8 +334,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
status = be_mcc_notify_wait(phba);
if (status)
- shost_printk(KERN_WARNING, phba->shost,
- " mgmt_epfw_cleanup , FAILED\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BG_%d : mgmt_epfw_cleanup , FAILED\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -459,8 +518,9 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
} else{
- shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
- dst_addr->sa_family);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : unknown addr family %d\n",
+ dst_addr->sa_family);
spin_unlock(&ctrl->mbox_lock);
free_mcc_tag(&phba->ctrl, tag);
return -EINVAL;
@@ -471,7 +531,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
if (phba->nxt_cqid == phba->num_cpus)
phba->nxt_cqid = 0;
req->cq_id = phwi_context->be_cq[i].id;
- SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
req->defq_id = def_hdr_id;
req->hdr_ring_id = def_hdr_id;
req->data_ring_id = def_data_id;
@@ -506,8 +567,8 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
if (!status)
phba->interface_handle = pbe_allid->if_hndl_list[0];
else {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed in mgmt_get_all_if_id\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed in mgmt_get_all_if_id\n");
}
spin_unlock(&ctrl->mbox_lock);
@@ -550,9 +611,10 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1,
- "mgmt_exec_nonemb_cmd Failed status = %d"
- "extd_status = %d\n", status, extd_status);
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
+ "extd_status = %d\n", status, extd_status);
rc = -EIO;
goto free_tag;
}
@@ -573,7 +635,8 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
{
cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
- SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to allocate memory for if info\n");
return -ENOMEM;
}
memset(cmd->va, 0, size);
@@ -629,8 +692,8 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
if (rc < 0)
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to Modify existing IP Address\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Modify existing IP Address\n");
return rc;
}
@@ -684,8 +747,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
if (if_info.dhcp_state) {
- shost_printk(KERN_WARNING, phba->shost,
- "DHCP Already Enabled\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : DHCP Already Enabled\n");
return 0;
}
/* The ip_param->len is 1 in DHCP case. Setting
@@ -712,8 +775,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
if (rc < 0) {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to Delete existing dhcp\n");
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Delete existing dhcp\n");
return rc;
}
}
@@ -732,8 +796,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
if (rc) {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to Get Gateway Addr\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
return rc;
}
@@ -743,8 +807,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
IP_ACTION_DEL, IP_V4_LEN);
if (rc) {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to clear Gateway Addr Set\n");
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to clear Gateway Addr Set\n");
return rc;
}
}
@@ -783,8 +848,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
if (rt_val) {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to Get Gateway Addr\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
return rt_val;
}
@@ -793,8 +858,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
gateway_param->len);
if (rt_val) {
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to clear Gateway Addr Set\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to clear Gateway Addr Set\n");
return rt_val;
}
}
@@ -804,8 +869,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
gateway_param->len);
if (rt_val)
- shost_printk(KERN_WARNING, phba->shost,
- "Failed to Set Gateway Addr\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Set Gateway Addr\n");
return rt_val;
}
@@ -924,3 +989,150 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
spin_unlock(&ctrl->mbox_lock);
return tag;
}
+
+/**
+ * be_mgmt_get_boot_shandle()- Get the session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
+ *
+ * Get the boot target session handle. In case of
+ * crashdump mode driver has to issue and MBX Cmd
+ * for FW to login to boot target
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ *
+ **/
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+ unsigned int *s_handle)
+{
+ struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag, wrb_num;
+ uint8_t boot_retry = 3;
+ unsigned short status, extd_status;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ do {
+ /* Get the Boot Target Session Handle and Count*/
+ tag = mgmt_get_boot_target(phba);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BG_%d : Getting Boot Target Info Failed\n");
+ return -EAGAIN;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : mgmt_get_boot_target Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EBUSY;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ boot_resp = embedded_payload(wrb);
+
+ /* Check if the there are any Boot targets configured */
+ if (!boot_resp->boot_session_count) {
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d ;No boot targets configured\n");
+ return -ENXIO;
+ }
+
+ /* FW returns the session handle of the boot session */
+ if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
+ *s_handle = boot_resp->boot_session_handle;
+ return 0;
+ }
+
+ /* Issue MBX Cmd to FW to login to the boot target */
+ tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
+ INVALID_SESS_HANDLE);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : mgmt_reopen_session Failed\n");
+ return -EAGAIN;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : mgmt_reopen_session Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EBUSY;
+ }
+ free_mcc_tag(&phba->ctrl, tag);
+
+ } while (--boot_retry);
+
+ /* Couldn't log into the boot target */
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : Login to Boot Target Failed\n");
+ return -ENXIO;
+}
+
+/**
+ * mgmt_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ * Success: 0
+ * Failure: Non-Xero Value
+ **/
+int mgmt_set_vlan(struct beiscsi_hba *phba,
+ uint16_t vlan_tag)
+{
+ unsigned int tag, wrb_num;
+ unsigned short status, extd_status;
+
+ tag = be_cmd_set_vlan(phba, vlan_tag);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BG_%d : VLAN Setting Failed\n");
+ return -EBUSY;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+ if (status || extd_status) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BS_%d : status : %d extd_status : %d\n",
+ status, extd_status);
+
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EAGAIN;
+ }
+
+ free_mcc_tag(&phba->ctrl, tag);
+ return 0;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 5c2e37693ca8..c50cef6fec0d 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -108,6 +108,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct bsg_job *job,
struct be_dma_mem *nonemb_cmd);
+#define BEISCSI_NO_RST_ISSUE 0
struct iscsi_invalidate_connection_params_in {
struct be_cmd_req_hdr hdr;
unsigned int session_handle;
@@ -274,6 +275,10 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+ unsigned int reopen_type,
+ unsigned sess_handle);
+
unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
u32 boot_session_handle,
struct be_dma_mem *nonemb_cmd);
@@ -290,4 +295,10 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
int mgmt_set_gateway(struct beiscsi_hba *phba,
struct iscsi_iface_param_info *gateway_param);
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+ unsigned int *s_handle);
+
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+
+int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
#endif
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 456e5762977d..b7c326f7a6d0 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -775,7 +775,8 @@ bfa_intx(struct bfa_s *bfa)
if (!intr)
return BFA_TRUE;
- bfa_msix_lpu_err(bfa, intr);
+ if (bfa->intr_enabled)
+ bfa_msix_lpu_err(bfa, intr);
return BFA_TRUE;
}
@@ -803,11 +804,17 @@ bfa_isr_enable(struct bfa_s *bfa)
writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
bfa->iocfc.intr_mask = ~umsk;
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
+
+ /*
+ * Set the flag indicating successful enabling of interrupts
+ */
+ bfa->intr_enabled = BFA_TRUE;
}
void
bfa_isr_disable(struct bfa_s *bfa)
{
+ bfa->intr_enabled = BFA_FALSE;
bfa_isr_mode_set(bfa, BFA_FALSE);
writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
bfa_msix_uninstall(bfa);
@@ -1022,7 +1029,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
{
u8 *dm_kva = NULL;
u64 dm_pa = 0;
- int i, per_reqq_sz, per_rspq_sz, dbgsz;
+ int i, per_reqq_sz, per_rspq_sz;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
@@ -1083,11 +1090,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
BFA_CACHELINE_SZ);
/* Claim IOCFC kva memory */
- dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
- if (dbgsz > 0) {
- bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
- bfa_mem_kva_curp(iocfc) += dbgsz;
- }
+ bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+ bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
}
/*
@@ -1429,8 +1433,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
/* kva memory setup for IOCFC */
- bfa_mem_kva_setup(meminfo, iocfc_kva,
- ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
+ bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
}
/*
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 12bfeed268eb..91a8aa394db5 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -168,7 +168,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
/*
* bfa_q_deq - dequeue an element from head of the queue
*/
-#define bfa_q_deq(_q, _qe) { \
+#define bfa_q_deq(_q, _qe) do { \
if (!list_empty(_q)) { \
(*((struct list_head **) (_qe))) = bfa_q_next(_q); \
bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
@@ -177,7 +177,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
} \
-}
+} while (0)
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 3bbc583f65cf..06f0a163ca35 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -93,6 +93,7 @@ struct bfa_lport_cfg_s {
wwn_t pwwn; /* port wwn */
wwn_t nwwn; /* node wwn */
struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
+ struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
enum bfa_lport_role roles; /* FCS port roles */
u32 rsvd;
bfa_boolean_t preboot_vp; /* vport created from PBC */
@@ -192,6 +193,18 @@ struct bfa_lport_stats_s {
u32 ns_gidft_unknown_rsp;
u32 ns_gidft_alloc_wait;
+ u32 ns_rnnid_sent;
+ u32 ns_rnnid_accepts;
+ u32 ns_rnnid_rsp_err;
+ u32 ns_rnnid_rejects;
+ u32 ns_rnnid_alloc_wait;
+
+ u32 ns_rsnn_nn_sent;
+ u32 ns_rsnn_nn_accepts;
+ u32 ns_rsnn_nn_rsp_err;
+ u32 ns_rsnn_nn_rejects;
+ u32 ns_rsnn_nn_alloc_wait;
+
/*
* Mgmt Server stats
*/
@@ -410,6 +423,11 @@ struct bfa_rport_remote_link_stats_s {
u32 icc; /* Invalid CRC Count */
};
+struct bfa_rport_qualifier_s {
+ wwn_t pwwn; /* Port WWN */
+ u32 pid; /* port ID */
+ u32 rsvd;
+};
#define BFA_MAX_IO_INDEX 7
#define BFA_NO_IO_INDEX 9
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a38..e0beb4d7e264 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1279,6 +1279,7 @@ enum {
GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
GS_RFT_ID = 0x0217, /* Register fc4type on ID */
GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
+ GS_RSNN_NN = 0x0239, /* Register symbolic NN on NN */
GS_RPN_ID = 0x0212, /* Register port name */
GS_RNN_ID = 0x0213, /* Register node name */
GS_RCS_ID = 0x0214, /* Register class of service */
@@ -1357,6 +1358,15 @@ struct fcgs_rspnid_req_s {
};
/*
+ * RSNN_NN
+ */
+struct fcgs_rsnn_nn_req_s {
+ wwn_t node_name; /* Node name */
+ u8 snn_len; /* symbolic node name length */
+ u8 snn[256]; /* symbolic node name */
+};
+
+/*
* RPN_ID
*/
struct fcgs_rpnid_req_s {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 17b59b8b5644..273cee90b3b4 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1252,6 +1252,27 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
}
u16
+fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ wwn_t node_name, u8 *name)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rsnn_nn_req_s *rsnn_nn =
+ (struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
+
+ memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
+
+ rsnn_nn->node_name = node_name;
+ rsnn_nn->snn_len = (u8) strlen((char *)name);
+ strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+
+ return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index 42cd9d4da697..03c753d1e548 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -166,6 +166,8 @@ enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
+u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ wwn_t node_name, u8 *name);
u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, enum bfa_lport_role role);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index f0f80e282e39..1633963c66ca 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1466,7 +1466,13 @@ bfa_status_t
bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
struct bfa_itnim_ioprofile_s *ioprofile)
{
- struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+ struct bfa_fcpim_s *fcpim;
+
+ if (!itnim)
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+
+ fcpim = BFA_FCPIM(itnim->bfa);
+
if (!fcpim->io_profile)
return BFA_STATUS_IOPROFILE_OFF;
@@ -1484,6 +1490,10 @@ void
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
{
int j;
+
+ if (!itnim)
+ return;
+
memset(&itnim->stats, 0, sizeof(itnim->stats));
memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
for (j = 0; j < BFA_IOBUCKET_MAX; j++)
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index eaac57e1ddec..fd3e84d32bd2 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -76,6 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
fcs->bfa = bfa;
fcs->bfad = bfad;
fcs->min_cfg = min_cfg;
+ fcs->num_rport_logins = 0;
bfa->fcs = BFA_TRUE;
fcbuild_init();
@@ -119,6 +120,18 @@ bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
}
/*
+ * Stop FCS operations.
+ */
+void
+bfa_fcs_stop(struct bfa_fcs_s *fcs)
+{
+ bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+ bfa_wc_up(&fcs->wc);
+ bfa_fcs_fabric_modstop(fcs);
+ bfa_wc_wait(&fcs->wc);
+}
+
+/*
* fcs pbc vport initialization
*/
void
@@ -153,6 +166,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
fcs->driver_info = *driver_info;
bfa_fcs_fabric_psymb_init(&fcs->fabric);
+ bfa_fcs_fabric_nsymb_init(&fcs->fabric);
}
/*
@@ -213,6 +227,8 @@ static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_delay(void *cbarg);
static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_delete_comp(void *cbarg);
+static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_stop_comp(void *cbarg);
static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
@@ -250,6 +266,10 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
/*
* Beginning state before fabric creation.
*/
@@ -334,6 +354,11 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
bfa_fcs_fabric_delete(fabric);
break;
+ case BFA_FCS_FABRIC_SM_STOP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ bfa_fcs_fabric_stop(fabric);
+ break;
+
default:
bfa_sm_fault(fabric->fcs, event);
}
@@ -585,6 +610,11 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
bfa_fcs_fabric_delete(fabric);
break;
+ case BFA_FCS_FABRIC_SM_STOP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
+ bfa_fcs_fabric_stop(fabric);
+ break;
+
case BFA_FCS_FABRIC_SM_AUTH_FAILED:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
@@ -682,7 +712,62 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
}
}
+/*
+ * Fabric is being stopped, awaiting vport stop completions.
+ */
+static void
+bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_STOPCOMP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Fabric is being stopped, cleanup without FLOGO
+ */
+static void
+bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_STOPCOMP:
+ case BFA_FCS_FABRIC_SM_LOGOCOMP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+ bfa_wc_down(&(fabric->fcs)->wc);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ /*
+ * Ignore - can get this event if we get notified about IOC down
+ * before the fabric completion callbk is done.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
/*
* fcs_fabric_private fabric private functions
@@ -760,6 +845,44 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
}
/*
+ * Node Symbolic Name Creation for base port and all vports
+ */
+void
+bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+ struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+ bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+ /* Model name/number */
+ strncpy((char *)&port_cfg->node_sym_name, model,
+ BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Driver Version */
+ strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
+ BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Host machine name */
+ strncat((char *)&port_cfg->node_sym_name,
+ (char *)driver_info->host_machine_name,
+ BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* null terminate */
+ port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
+/*
* bfa lps login completion callback
*/
void
@@ -919,6 +1042,28 @@ bfa_fcs_fabric_delay(void *cbarg)
}
/*
+ * Stop all vports and wait for vport stop completions.
+ */
+static void
+bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe, *qen;
+
+ bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
+
+ list_for_each_safe(qe, qen, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ bfa_wc_up(&fabric->stop_wc);
+ bfa_fcs_vport_fcs_stop(vport);
+ }
+
+ bfa_wc_up(&fabric->stop_wc);
+ bfa_fcs_lport_stop(&fabric->bport);
+ bfa_wc_wait(&fabric->stop_wc);
+}
+
+/*
* Computes operating BB_SCN value
*/
static u8
@@ -978,6 +1123,14 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
}
+static void
+bfa_fcs_fabric_stop_comp(void *cbarg)
+{
+ struct bfa_fcs_fabric_s *fabric = cbarg;
+
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
+}
+
/*
* fcs_fabric_public fabric public functions
*/
@@ -1039,6 +1192,19 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
}
/*
+ * Fabric module stop -- stop FCS actions
+ */
+void
+bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric;
+
+ bfa_trc(fcs, 0);
+ fabric = &fcs->fabric;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
+}
+
+/*
* Fabric module start -- kick starts FCS actions
*/
void
@@ -1219,8 +1385,11 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
}
- bfa_trc(fabric->fcs, els_cmd->els_code);
- bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+ if (!bfa_fcs_fabric_is_switched(fabric))
+ bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+ bfa_trc(fabric->fcs, fchs->type);
}
/*
@@ -1294,7 +1463,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
u16 reqlen;
struct fchs_s fchs;
- fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
/*
* Do not expect this failure -- expect remote node to retry
*/
@@ -1387,6 +1556,13 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
}
}
+void
+bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
+{
+ struct bfa_fcs_fabric_s *fabric = uarg;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
+}
+
/*
* Returns FCS vf structure for a given vf_id.
*
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 51c9e1345719..6c4377cb287f 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -62,9 +62,9 @@ struct bfa_fcs_s;
#define N2N_LOCAL_PID 0x010000
#define N2N_REMOTE_PID 0x020000
#define BFA_FCS_RETRY_TIMEOUT 2000
+#define BFA_FCS_MAX_NS_RETRIES 5
#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
-
-
+#define BFA_FCS_MAX_RPORT_LOGINS 1024
struct bfa_fcs_lport_ns_s {
bfa_sm_t sm; /* state machine */
@@ -72,6 +72,8 @@ struct bfa_fcs_lport_ns_s {
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
+ u8 num_rnnid_retries;
+ u8 num_rsnn_nn_retries;
};
@@ -205,6 +207,7 @@ struct bfa_fcs_fabric_s {
struct bfa_lps_s *lps; /* lport login services */
u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
/* attached fabric's ip addr */
+ struct bfa_wc_s stop_wc; /* wait counter for stop */
};
#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
@@ -264,6 +267,7 @@ struct bfa_fcs_fabric_s;
#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
+#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
#define bfa_fcs_lport_is_initiator(_lport) \
((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
#define bfa_fcs_lport_get_nrports(_lport) \
@@ -286,9 +290,8 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
-void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
- wwn_t rport_wwns[], int *nrports);
-
+void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+ struct bfa_rport_qualifier_s rport[], int *nrports);
wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
int index, int nrports, bfa_boolean_t bwwn);
@@ -324,12 +327,17 @@ void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
+ struct bfa_fcs_lport_s *port, u32 pid);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
struct bfa_fcs_lport_s *port, wwn_t pwwn);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t nwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
+ struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport);
void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
@@ -338,6 +346,8 @@ void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
@@ -382,6 +392,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
@@ -419,6 +430,7 @@ struct bfa_fcs_rport_s {
struct bfa_fcs_s *fcs; /* fcs instance */
struct bfad_rport_s *rp_drv; /* driver peer instance */
u32 pid; /* port ID of rport */
+ u32 old_pid; /* PID before rport goes offline */
u16 maxfrsize; /* maximum frame size */
__be16 reply_oxid; /* OX_ID of inbound requests */
enum fc_cos fc_cos; /* FC classes of service supp */
@@ -459,7 +471,7 @@ struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t rnwwn);
void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
-
+void bfa_fcs_rport_set_max_logins(u32 max_logins);
void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
struct fchs_s *fchs, u16 len);
void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
@@ -505,12 +517,13 @@ struct bfa_fcs_itnim_s {
struct bfa_fcxp_s *fcxp; /* FCXP in use */
struct bfa_itnim_stats_s stats; /* itn statistics */
};
-#define bfa_fcs_fcxp_alloc(__fcs) \
- bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
-
-#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
- bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
- NULL, 0, 0, NULL, NULL, NULL, NULL)
+#define bfa_fcs_fcxp_alloc(__fcs, __req) \
+ bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0, \
+ NULL, NULL, NULL, NULL, __req)
+#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
+ __alloc_cbarg, __req) \
+ bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
+ __alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
static inline struct bfad_port_s *
bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
@@ -592,7 +605,7 @@ bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
-void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
@@ -676,6 +689,7 @@ struct bfa_fcs_s {
struct bfa_fcs_stats_s stats; /* FCS statistics */
struct bfa_wc_s wc; /* waiting counter */
int fcs_aen_seq;
+ u32 num_rport_logins;
};
/*
@@ -702,6 +716,9 @@ enum bfa_fcs_fabric_event {
BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
BFA_FCS_FABRIC_SM_START = 16, /* from driver */
+ BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
+ BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
+ BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
};
/*
@@ -727,6 +744,26 @@ enum rport_event {
RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
+ RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */
+};
+
+/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+ BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
+ BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
+ BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
+ BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
+ BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
+ BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
+ BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
+ BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
+ BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
+ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+ BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */
};
/*
@@ -741,6 +778,7 @@ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
struct bfa_fcs_driver_info_s *driver_info);
void bfa_fcs_exit(struct bfa_fcs_s *fcs);
+void bfa_fcs_stop(struct bfa_fcs_s *fcs);
/*
* bfa fcs vf public functions
@@ -766,11 +804,13 @@ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
wwn_t fabric_name);
u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 9272840a2409..6dc7926a3edd 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -40,25 +40,6 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
enum bfa_itnim_aen_event event);
-/*
- * fcs_itnim_sm FCS itnim state machine events
- */
-
-enum bfa_fcs_itnim_event {
- BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */
- BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
- BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
- BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
- BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
- BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
- BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
- BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
- BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
- BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
- BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
- BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
-};
-
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
@@ -69,6 +50,8 @@ static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
@@ -99,7 +82,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
bfa_trc(itnim->fcs, event);
switch (event) {
- case BFA_FCS_ITNIM_SM_ONLINE:
+ case BFA_FCS_ITNIM_SM_FCS_ONLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
itnim->prli_retries = 0;
bfa_fcs_itnim_send_prli(itnim, NULL);
@@ -138,6 +121,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -166,12 +150,13 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
switch (event) {
case BFA_FCS_ITNIM_SM_RSP_OK:
- if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) {
+ if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
- } else {
- bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
- bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
- }
+ else
+ bfa_sm_set_state(itnim,
+ bfa_fcs_itnim_sm_hal_rport_online);
+
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -194,6 +179,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_fcxp_discard(itnim->fcxp);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@@ -208,6 +194,44 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
}
static void
+bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_HAL_ONLINE:
+ if (!itnim->bfa_itnim)
+ itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
+ itnim->rport->bfa_rport, itnim);
+
+ if (itnim->bfa_itnim) {
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
+ bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
+ } else {
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
+ }
+
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
{
@@ -238,6 +262,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_timer_stop(&itnim->timer);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@@ -275,9 +300,8 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
- bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
bfa_itnim_offline(itnim->bfa_itnim);
- bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@@ -372,8 +396,14 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
break;
+ /*
+ * fcs_online is expected here for well known initiator ports
+ */
+ case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
case BFA_FCS_ITNIM_SM_RSP_ERROR:
- case BFA_FCS_ITNIM_SM_ONLINE:
case BFA_FCS_ITNIM_SM_INITIATOR:
break;
@@ -426,11 +456,12 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(itnim->fcs, itnim->rport->pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
itnim->stats.fcxp_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
- bfa_fcs_itnim_send_prli, itnim);
+ bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
return;
}
itnim->fcxp = fcxp;
@@ -483,7 +514,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
if (prli_resp->parampage.servparams.initiator) {
bfa_trc(itnim->fcs, prli_resp->parampage.type);
itnim->rport->scsi_function =
- BFA_RPORT_INITIATOR;
+ BFA_RPORT_INITIATOR;
itnim->stats.prli_rsp_acc++;
itnim->stats.initiator++;
bfa_sm_send_event(itnim,
@@ -531,7 +562,11 @@ bfa_fcs_itnim_timeout(void *arg)
static void
bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
{
- bfa_itnim_delete(itnim->bfa_itnim);
+ if (itnim->bfa_itnim) {
+ bfa_itnim_delete(itnim->bfa_itnim);
+ itnim->bfa_itnim = NULL;
+ }
+
bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
}
@@ -552,7 +587,6 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_fcs_itnim_s *itnim;
struct bfad_itnim_s *itnim_drv;
- struct bfa_itnim_s *bfa_itnim;
/*
* call bfad to allocate the itnim
@@ -570,20 +604,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
itnim->fcs = rport->fcs;
itnim->itnim_drv = itnim_drv;
- /*
- * call BFA to create the itnim
- */
- bfa_itnim =
- bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
-
- if (bfa_itnim == NULL) {
- bfa_trc(port->fcs, rport->pwwn);
- bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
- WARN_ON(1);
- return NULL;
- }
-
- itnim->bfa_itnim = bfa_itnim;
+ itnim->bfa_itnim = NULL;
itnim->seq_rec = BFA_FALSE;
itnim->rec_support = BFA_FALSE;
itnim->conf_comp = BFA_FALSE;
@@ -613,20 +634,12 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
* Notification from rport that PLOGI is complete to initiate FC-4 session.
*/
void
-bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
+bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
{
itnim->stats.onlines++;
- if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) {
- bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
- } else {
- /*
- * For well known addresses, we set the itnim to initiator
- * state
- */
- itnim->stats.initiator++;
- bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
- }
+ if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
}
/*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index bcc4966e8ba4..3b75f6fb2de1 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -131,6 +131,8 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
break;
case BFA_FCS_PORT_SM_OFFLINE:
@@ -166,6 +168,8 @@ bfa_fcs_lport_sm_online(
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
@@ -222,6 +226,8 @@ bfa_fcs_lport_sm_offline(
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
@@ -267,6 +273,8 @@ bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
}
break;
@@ -340,7 +348,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -370,7 +378,7 @@ bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -507,7 +515,7 @@ bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -552,7 +560,7 @@ bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_len);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -684,7 +692,7 @@ bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -854,6 +862,25 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
}
/*
+ * OLD_PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (rport->old_pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pid);
+ return NULL;
+}
+
+/*
* PWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
@@ -892,6 +919,26 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
}
/*
+ * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
+ wwn_t pwwn, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pwwn);
+ return NULL;
+}
+
+/*
* Called by rport module when new rports are discovered.
*/
void
@@ -939,6 +986,16 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
}
/*
+ * Called by fabric for base port and by vport for virtual ports
+ * when target mode driver is unloaded.
+ */
+void
+bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
+}
+
+/*
* Called by fabric to delete base lport and associated resources.
*
* Called by vport to delete lport and associated resources. Should call
@@ -1657,10 +1714,11 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
- bfa_fcs_lport_fdmi_send_rhba, fdmi);
+ bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@@ -1931,10 +1989,11 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
- bfa_fcs_lport_fdmi_send_rprt, fdmi);
+ bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@@ -2146,10 +2205,11 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
- bfa_fcs_lport_fdmi_send_rpa, fdmi);
+ bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@@ -2736,10 +2796,11 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
- bfa_fcs_lport_ms_send_gmal, ms);
+ bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@@ -2936,10 +2997,11 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
- bfa_fcs_lport_ms_send_gfn, ms);
+ bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@@ -3012,11 +3074,12 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ms_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
- bfa_fcs_lport_ms_send_plogi, ms);
+ bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@@ -3166,6 +3229,10 @@ static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_timeout(void *arg);
static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
@@ -3202,6 +3269,20 @@ static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_process_gidft_pids(
struct bfa_fcs_lport_s *port,
u32 *pid_buf, u32 n_pids);
@@ -3226,6 +3307,8 @@ enum vport_ns_event {
NSSM_EVENT_RFTID_SENT = 9,
NSSM_EVENT_RFFID_SENT = 10,
NSSM_EVENT_GIDFT_SENT = 11,
+ NSSM_EVENT_RNNID_SENT = 12,
+ NSSM_EVENT_RSNN_NN_SENT = 13,
};
static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
@@ -3266,6 +3349,21 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rnn_id(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rsnn_nn(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rsnn_nn_retry(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
/*
* Start in offline state - awaiting linkup
*/
@@ -3333,8 +3431,9 @@ bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
break;
case NSSM_EVENT_RSP_OK:
- bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
- bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+ ns->num_rnnid_retries = 0;
+ bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
@@ -3374,6 +3473,176 @@ bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
}
static void
+bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RNNID_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+ ns->num_rnnid_retries = 0;
+ ns->num_rsnn_nn_retries = 0;
+ bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
+ ns->port->stats.ns_retries++;
+ ns->num_rnnid_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_rspn_id);
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ }
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(ns->fcxp);
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+ bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSNN_NN_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+ ns->num_rsnn_nn_retries = 0;
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
+ ns->port->stats.ns_retries++;
+ ns->num_rsnn_nn_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout,
+ ns, BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_rspn_id);
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ }
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+ bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
@@ -3770,11 +4039,12 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
- bfa_fcs_lport_ns_send_plogi, ns);
+ bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@@ -3853,6 +4123,162 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
/*
+ * Register node name for port_id
+ */
+static void
+bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rnnid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
+ return;
+ }
+
+ ns->fcxp = fcxp;
+
+ len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_nwwn(port));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rnnid_sent++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rnnid_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rnnid_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rnnid_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic node name for a given node name.
+ */
+static void
+bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+ u8 *nsymbl;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rsnn_nn_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
+ bfa_fcs_get_base_port(port->fcs)));
+
+ len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_nwwn(port), nsymbl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rsnn_nn_sent++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rsnn_nn_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rsnn_nn_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rsnn_nn_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
* Register the symbolic port name.
*/
static void
@@ -3870,11 +4296,12 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rspnid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
- bfa_fcs_lport_ns_send_rspn_id, ns);
+ bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@@ -3971,11 +4398,12 @@ bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rftid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
- bfa_fcs_lport_ns_send_rft_id, ns);
+ bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@@ -4044,11 +4472,12 @@ bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rffid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
- bfa_fcs_lport_ns_send_rff_id, ns);
+ bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@@ -4127,11 +4556,12 @@ bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_gidft_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
- bfa_fcs_lport_ns_send_gid_ft, ns);
+ bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@@ -4261,6 +4691,10 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
struct fcgs_gidft_resp_s *gidft_entry;
struct bfa_fcs_rport_s *rport;
u32 ii;
+ struct bfa_fcs_fabric_s *fabric = port->fabric;
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
+ u8 found = 0;
for (ii = 0; ii < n_pids; ii++) {
gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
@@ -4269,6 +4703,29 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
continue;
/*
+ * Ignore PID if it is of base port
+ * (Avoid vports discovering base port as remote port)
+ */
+ if (gidft_entry->pid == fabric->bport.pid)
+ continue;
+
+ /*
+ * Ignore PID if it is of vport created on the same base port
+ * (Avoid vport discovering every other vport created on the
+ * same port as remote port)
+ */
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (vport->lport.pid == gidft_entry->pid)
+ found = 1;
+ }
+
+ if (found) {
+ found = 0;
+ continue;
+ }
+
+ /*
* Check if this rport already exists
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
@@ -4335,7 +4792,8 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
bfa_trc(port->fcs, port->pid);
- bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
+ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
+ bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
}
static void
@@ -4355,6 +4813,70 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
}
}
+void
+bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ u8 symbl[256];
+ u8 *psymbl = &symbl[0];
+ int len;
+
+ if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+ return;
+
+ /* Avoid sending RSPN in the following states. */
+ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
+ return;
+
+ memset(symbl, 0, sizeof(symbl));
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp) {
+ port->stats.ns_rspnid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
+ return;
+ }
+
+ ns->fcxp = fcxp;
+
+ if (port->vport) {
+ /*
+ * For Vports, we append the vport's port symbolic name
+ * to that of the base port.
+ */
+ strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ (bfa_fcs_get_base_port(port->fcs))),
+ strlen((char *)&bfa_fcs_lport_get_psym_name(
+ bfa_fcs_get_base_port(port->fcs))));
+
+ /* Ensure we have a null terminating string. */
+ ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
+ bfa_fcs_get_base_port(port->fcs)))] = 0;
+
+ strncat((char *)psymbl,
+ (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ }
+
+ len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, psymbl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+ port->stats.ns_rspnid_sent++;
+}
+
/*
* FCS SCN
*/
@@ -4529,10 +5051,11 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
bfa_trc(port->fcs, port->port_cfg.pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
- bfa_fcs_lport_scn_send_scr, scn);
+ bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
return;
}
scn->fcxp = fcxp;
@@ -4614,7 +5137,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
bfa_trc(port->fcs, rx_fchs->s_id);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -4688,14 +5211,33 @@ static void
bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
{
struct bfa_fcs_rport_s *rport;
+ struct bfa_fcs_fabric_s *fabric = port->fabric;
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
bfa_trc(port->fcs, rpid);
/*
+ * Ignore PID if it is of base port or of vports created on the
+ * same base port. It is to avoid vports discovering base port or
+ * other vports created on same base port as remote port
+ */
+ if (rpid == fabric->bport.pid)
+ return;
+
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (vport->lport.pid == rpid)
+ return;
+ }
+ /*
* If this is an unknown device, then it just came online.
* Otherwise let rport handle the RSCN event.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
+ if (!rport)
+ rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
+
if (rport == NULL) {
/*
* If min cfg mode is enabled, we donot need to
@@ -4888,15 +5430,15 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
}
void
-bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
- wwn_t rport_wwns[], int *nrports)
+bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+ struct bfa_rport_qualifier_s rports[], int *nrports)
{
struct list_head *qh, *qe;
struct bfa_fcs_rport_s *rport = NULL;
int i;
struct bfa_fcs_s *fcs;
- if (port == NULL || rport_wwns == NULL || *nrports == 0)
+ if (port == NULL || rports == NULL || *nrports == 0)
return;
fcs = port->fcs;
@@ -4916,7 +5458,13 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
continue;
}
- rport_wwns[i] = rport->pwwn;
+ if (!rport->pwwn && !rport->pid) {
+ qe = bfa_q_next(qe);
+ continue;
+ }
+
+ rports[i].pwwn = rport->pwwn;
+ rports[i].pid = rport->pid;
i++;
qe = bfa_q_next(qe);
@@ -5760,6 +6308,16 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_cleanup++;
}
+
+/*
+ * Stop notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+}
+
/*
* delete notification from fabric SM. To be invoked from within FCS.
*/
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index fe0463a1db04..cc43b2a58ce3 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -30,14 +30,22 @@ static u32
bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
/* In millisecs */
/*
+ * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
+ * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
+ */
+static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
+/*
* forward declarations
*/
static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
-static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
-static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
struct fc_logi_s *plogi);
static void bfa_fcs_rport_timeout(void *arg);
@@ -76,6 +84,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
enum rport_event event);
@@ -87,6 +96,8 @@ static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
@@ -123,6 +134,10 @@ static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@@ -130,6 +145,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
{BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
+ {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
@@ -167,8 +183,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_RCVD:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
- bfa_fcs_rport_send_plogiacc(rport, NULL);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@@ -252,8 +268,8 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FCXP_SENT:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
- bfa_fcs_rport_hal_online(rport);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_DELETE:
@@ -348,9 +364,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@@ -370,9 +386,9 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
switch (event) {
case RPSM_EVENT_ACCEPTED:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
rport->plogi_retries = 0;
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
@@ -397,6 +413,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_stats(rport->port, rport_del_max_plogi_retry);
+ rport->old_pid = rport->pid;
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -443,13 +460,77 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
+ */
+static void
+bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_FCS_ONLINE:
+ if (rport->scsi_function == BFA_RPORT_INITIATOR) {
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_online(rport);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+ break;
+ }
+
+ if (!rport->bfa_rport)
+ rport->bfa_rport =
+ bfa_rport_create(rport->fcs->bfa, rport);
+
+ if (rport->bfa_rport) {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_fcs_rport_hal_online(rport);
+ } else {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ }
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ rport->plogi_pending = BFA_TRUE;
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_SCN:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
+ break;
}
}
@@ -468,41 +549,34 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_HCB_ONLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
- bfa_fcs_rport_online_action(rport);
+ bfa_fcs_rport_hal_online_action(rport);
break;
- case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_PLOGI_COMP:
break;
+ case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_LOGO_RCVD:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcs_rport_fcs_offline_action(rport);
break;
+ case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
rport->plogi_pending = BFA_TRUE;
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
- break;
-
- case RPSM_EVENT_SCN:
- /*
- * @todo
- * Ignore SCN - PLOGI just completed, FC-4 login should detect
- * device failures.
- */
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
break;
default:
@@ -537,18 +611,18 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@@ -579,7 +653,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@@ -592,24 +666,16 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
- rport->pid = 0;
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
- bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_timer_start(rport->fcs->bfa, &rport->timer,
- bfa_fcs_rport_timeout, rport,
- bfa_fcs_rport_del_timeout);
- break;
-
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@@ -642,14 +708,14 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
}
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@@ -659,7 +725,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@@ -668,7 +734,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@@ -696,21 +762,21 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@@ -719,7 +785,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@@ -756,13 +822,13 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@@ -774,14 +840,14 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_offline_action(rport);
+ bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@@ -803,13 +869,19 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_fcs_rport_hal_offline(rport);
break;
case RPSM_EVENT_DELETE:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ if (rport->pid && (rport->prlo == BFA_TRUE))
+ bfa_fcs_rport_send_prlo_acc(rport);
+ if (rport->pid && (rport->prlo == BFA_FALSE))
+ bfa_fcs_rport_send_logo_acc(rport);
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
break;
+ case RPSM_EVENT_HCB_ONLINE:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
@@ -835,7 +907,20 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+ break;
+
+ case RPSM_EVENT_HCB_ONLINE:
+ case RPSM_EVENT_DELETE:
+ /* Rport is being deleted */
break;
default:
@@ -857,13 +942,23 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ bfa_fcs_rport_hal_offline(rport);
break;
- case RPSM_EVENT_SCN:
- case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_LOGO_RCVD:
+ /*
+ * Rport is going offline. Just ack the logo
+ */
+ bfa_fcs_rport_send_logo_acc(rport);
+ break;
+
case RPSM_EVENT_PRLO_RCVD:
+ bfa_fcs_rport_send_prlo_acc(rport);
+ break;
+
+ case RPSM_EVENT_HCB_ONLINE:
+ case RPSM_EVENT_SCN:
+ case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
/*
* rport is already going offline.
@@ -907,24 +1002,23 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
*/
case RPSM_EVENT_ADDRESS_CHANGE:
- if (bfa_fcs_lport_is_online(rport->port)) {
- if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
- bfa_sm_set_state(rport,
- bfa_fcs_rport_sm_nsdisc_sending);
- rport->ns_retries = 0;
- bfa_fcs_rport_send_nsdisc(rport, NULL);
- } else {
- bfa_sm_set_state(rport,
- bfa_fcs_rport_sm_plogi_sending);
- rport->plogi_retries = 0;
- bfa_fcs_rport_send_plogi(rport, NULL);
- }
- } else {
+ if (!bfa_fcs_lport_is_online(rport->port)) {
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
+ break;
+ }
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
}
break;
@@ -1001,7 +1095,11 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_DELETE:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ if (rport->pid && (rport->prlo == BFA_TRUE))
+ bfa_fcs_rport_send_prlo_acc(rport);
+ if (rport->pid && (rport->prlo == BFA_FALSE))
+ bfa_fcs_rport_send_logo_acc(rport);
break;
case RPSM_EVENT_LOGO_IMP:
@@ -1040,7 +1138,14 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ break;
+
case RPSM_EVENT_ADDRESS_CHANGE:
break;
@@ -1072,7 +1177,11 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
@@ -1126,9 +1235,9 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_PLOGI_SEND:
@@ -1190,9 +1299,9 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@@ -1254,9 +1363,9 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@@ -1296,6 +1405,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_sm_nsdisc_sending);
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
+ rport->old_pid = rport->pid;
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -1343,9 +1453,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_discard(rport->fcxp);
- bfa_fcs_rport_hal_online(rport);
+ bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@@ -1353,7 +1463,63 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
}
}
+/*
+ * Rport needs to be deleted
+ * waiting for ITNIM clean up to finish
+ */
+static void
+bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+ switch (event) {
+ case RPSM_EVENT_FC4_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_PLOGI_RCVD:
+ /* Ignore these events */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ break;
+ }
+}
+
+/*
+ * RPort needs to be deleted
+ * waiting for BFA/FW to finish current processing
+ */
+static void
+bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_PLOGI_RCVD:
+ /* Ignore these events */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
/*
* fcs_rport_private FCS RPORT provate functions
@@ -1370,10 +1536,11 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
- bfa_fcs_rport_send_plogi, rport);
+ bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@@ -1490,10 +1657,11 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->reply_oxid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
- bfa_fcs_rport_send_plogiacc, rport);
+ bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
return;
}
rport->fcxp = fcxp;
@@ -1522,10 +1690,11 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
- bfa_fcs_rport_send_adisc, rport);
+ bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@@ -1585,10 +1754,11 @@ bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pid);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
- bfa_fcs_rport_send_nsdisc, rport);
+ bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@@ -1741,10 +1911,11 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
port = rport->port;
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
- bfa_fcs_rport_send_logo, rport);
+ bfa_fcs_rport_send_logo, rport, BFA_FALSE);
return;
}
rport->fcxp = fcxp;
@@ -1778,7 +1949,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
port = rport->port;
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -1849,7 +2020,7 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
bfa_fcs_itnim_is_initiator(rport->itnim);
}
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -1886,7 +2057,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -1920,7 +2091,7 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
*/
if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -1957,6 +2128,15 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
bfa_rport_online(rport->bfa_rport, &rport_info);
}
+static void
+bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
+{
+ if (rport->bfa_rport)
+ bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ else
+ bfa_cb_rport_offline(rport);
+}
+
static struct bfa_fcs_rport_s *
bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
{
@@ -1967,6 +2147,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
/*
* allocate rport
*/
+ if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
+ bfa_trc(fcs, rpid);
+ return NULL;
+ }
+
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
!= BFA_STATUS_OK) {
bfa_trc(fcs, rpid);
@@ -1981,16 +2166,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->rp_drv = rport_drv;
rport->pid = rpid;
rport->pwwn = pwwn;
+ rport->old_pid = 0;
- /*
- * allocate BFA rport
- */
- rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
- if (!rport->bfa_rport) {
- bfa_trc(fcs, rpid);
- kfree(rport_drv);
- return NULL;
- }
+ rport->bfa_rport = NULL;
/*
* allocate FC-4s
@@ -2001,14 +2179,13 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->itnim = bfa_fcs_itnim_create(rport);
if (!rport->itnim) {
bfa_trc(fcs, rpid);
- bfa_sm_send_event(rport->bfa_rport,
- BFA_RPORT_SM_DELETE);
kfree(rport_drv);
return NULL;
}
}
bfa_fcs_lport_add_rport(port, rport);
+ fcs->num_rport_logins++;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
@@ -2024,20 +2201,28 @@ static void
bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
+ struct bfa_fcs_s *fcs = port->fcs;
/*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
*/
+ rport->plogi_pending = BFA_FALSE;
+
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_delete(rport->itnim);
if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_offline(rport);
}
- bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+ if (rport->bfa_rport) {
+ bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+ rport->bfa_rport = NULL;
+ }
+
bfa_fcs_lport_del_rport(port, rport);
+ fcs->num_rport_logins--;
kfree(rport->rp_drv);
}
@@ -2071,7 +2256,18 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
}
static void
-bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
+bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
+{
+ if ((!rport->pid) || (!rport->pwwn)) {
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_sm_fault(rport->fcs, rport->pid);
+ }
+
+ bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
+}
+
+static void
+bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
@@ -2086,7 +2282,7 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
}
if (bfa_fcs_lport_is_initiator(port)) {
- bfa_fcs_itnim_rport_online(rport->itnim);
+ bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
};
@@ -2102,15 +2298,28 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
}
static void
-bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
+bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
+{
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_offline(rport);
+
+ bfa_fcs_itnim_rport_offline(rport->itnim);
+}
+
+static void
+bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
+ if (!rport->bfa_rport) {
+ bfa_fcs_rport_fcs_offline_action(rport);
+ return;
+ }
+
rport->stats.offlines++;
- rport->plogi_pending = BFA_FALSE;
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
@@ -2340,7 +2549,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
-
/*
* brief
* This routine BFA callback for bfa_rport_online() call.
@@ -2508,7 +2716,7 @@ bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
bfa_trc(rport->fcs, rport->pid);
- fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
@@ -2534,7 +2742,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
bfa_trc(rport->fcs, rx_fchs->s_id);
- fcxp = bfa_fcs_fcxp_alloc(rport->fcs);
+ fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
if (!fcxp)
return;
@@ -2582,6 +2790,17 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
}
+/*
+ * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
+ * which limits number of concurrent logins to remote ports
+ */
+void
+bfa_fcs_rport_set_max_logins(u32 max_logins)
+{
+ if (max_logins > 0)
+ bfa_fcs_rport_max_logins = max_logins;
+}
+
void
bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
struct bfa_rport_attr_s *rport_attr)
@@ -2605,9 +2824,11 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
rport_attr->curr_speed = rport->rpf.rpsc_speed;
rport_attr->assigned_speed = rport->rpf.assigned_speed;
- qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
- qos_attr.qos_flow_id =
- cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+ if (rport->bfa_rport) {
+ qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+ qos_attr.qos_flow_id =
+ cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+ }
rport_attr->qos_attr = qos_attr;
rport_attr->trl_enforced = BFA_FALSE;
@@ -2940,10 +3161,11 @@ bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
- fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
- bfa_fcs_rpf_send_rpsc2, rpf);
+ bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
return;
}
rpf->fcxp = fcxp;
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 8cdb79c2fcdf..7689872349f2 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -92,7 +92,6 @@ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
-static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
@@ -599,8 +598,9 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
break;
case IOC_E_HWERROR:
+ case IOC_E_HWFAILED:
/*
- * HB failure notification, ignore.
+ * HB failure / HW error notification, ignore.
*/
break;
default:
@@ -632,6 +632,10 @@ bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
+ case IOC_E_HWERROR:
+ /* Ignore - already in hwfail state */
+ break;
+
default:
bfa_sm_fault(ioc, event);
}
@@ -1455,7 +1459,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
+ if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
@@ -1480,7 +1484,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- if (fwhdr.signature != drv_fwhdr->signature) {
+ if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
@@ -1704,7 +1708,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
- fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
+ cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
loff += sizeof(u32);
@@ -2260,6 +2264,12 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
+void
+bfa_ioc_suspend(struct bfa_ioc_s *ioc)
+{
+ ioc->dbg_fwsave_once = BFA_TRUE;
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
/*
* Initialize memory for saving firmware trace. Driver must initialize
@@ -2269,7 +2279,7 @@ void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
- ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+ ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
}
/*
@@ -2856,7 +2866,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
/*
* Save firmware trace if configured.
*/
-static void
+void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
{
int tlen;
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 1a99d4b5b50f..593ce6b2bad2 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -820,6 +820,7 @@ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
void bfa_ioc_detach(struct bfa_ioc_s *ioc);
+void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_pcifn_class clscode);
void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
@@ -866,6 +867,7 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
+void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
/*
* asic block configuration related APIs
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 2d36e4823835..189fff71e3c2 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -121,6 +121,7 @@ struct bfa_s {
bfa_boolean_t fcs; /* FCS is attached to BFA */
struct bfa_msix_s msix;
int bfa_aen_seq;
+ bfa_boolean_t intr_enabled; /* Status of interrupts */
};
extern bfa_boolean_t bfa_auto_recover;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 2e856e6710f7..b2538d60db34 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -440,9 +440,11 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
- INIT_LIST_HEAD(&mod->fcxp_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_req_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
- INIT_LIST_HEAD(&mod->fcxp_unused_q);
+ INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
mod->fcxp_list = fcxp;
@@ -450,7 +452,14 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp->fcxp_mod = mod;
fcxp->fcxp_tag = i;
- list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+ if (i < (mod->num_fcxps / 2)) {
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ fcxp->req_rsp = BFA_TRUE;
+ } else {
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+ fcxp->req_rsp = BFA_FALSE;
+ }
+
bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
fcxp->reqq_waiting = BFA_FALSE;
@@ -514,7 +523,8 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (!cfg->drvcfg.min_cfg)
mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
- INIT_LIST_HEAD(&mod->wait_q);
+ INIT_LIST_HEAD(&mod->req_wait_q);
+ INIT_LIST_HEAD(&mod->rsp_wait_q);
claim_fcxps_mem(mod);
}
@@ -542,7 +552,8 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
struct list_head *qe, *qen;
/* Enqueue unused fcxp resources to free_q */
- list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+ list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
+ list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
@@ -559,11 +570,14 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
}
static struct bfa_fcxp_s *
-bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp;
- bfa_q_deq(&fm->fcxp_free_q, &fcxp);
+ if (req)
+ bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
+ else
+ bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
if (fcxp)
list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
@@ -642,7 +656,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
struct bfa_fcxp_wqe_s *wqe;
- bfa_q_deq(&mod->wait_q, &wqe);
+ if (fcxp->req_rsp)
+ bfa_q_deq(&mod->req_wait_q, &wqe);
+ else
+ bfa_q_deq(&mod->rsp_wait_q, &wqe);
+
if (wqe) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
@@ -657,7 +675,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
list_del(&fcxp->qe);
- list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+
+ if (fcxp->req_rsp)
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ else
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
}
static void
@@ -900,21 +922,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
* Address (given the sge index).
* @param[in] get_rsp_sglen function ptr to be called to get a response SG
* len (given the sge index).
+ * @param[in] req Allocated FCXP is used to send req or rsp?
+ * request - BFA_TRUE, response - BFA_FALSE
*
* @return FCXP instance. NULL on failure.
*/
struct bfa_fcxp_s *
-bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
- int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
- bfa_fcxp_get_sglen_t req_sglen_cbfn,
- bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
- bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp = NULL;
WARN_ON(bfa == NULL);
- fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
+ fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
if (fcxp == NULL)
return NULL;
@@ -1071,17 +1095,20 @@ bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
}
void
-bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
void *caller, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
- bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- WARN_ON(!list_empty(&mod->fcxp_free_q));
+ if (req)
+ WARN_ON(!list_empty(&mod->fcxp_req_free_q));
+ else
+ WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
wqe->alloc_cbfn = alloc_cbfn;
wqe->alloc_cbarg = alloc_cbarg;
@@ -1094,7 +1121,10 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
wqe->rsp_sga_cbfn = rsp_sga_cbfn;
wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
- list_add_tail(&wqe->qe, &mod->wait_q);
+ if (req)
+ list_add_tail(&wqe->qe, &mod->req_wait_q);
+ else
+ list_add_tail(&wqe->qe, &mod->rsp_wait_q);
}
void
@@ -1102,7 +1132,8 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
+ WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
+ !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
list_del(&wqe->qe);
}
@@ -1153,8 +1184,13 @@ bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
int i;
for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
- bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
- list_add_tail(qe, &mod->fcxp_unused_q);
+ if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
+ bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_req_unused_q);
+ } else {
+ bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_rsp_unused_q);
+ }
}
}
@@ -1404,11 +1440,11 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_FWRSP:
+ case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_lps_logout_comp(lps);
break;
- case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@@ -1786,6 +1822,8 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+ else
+ bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/*
@@ -4237,6 +4275,10 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_rport_offline_cb(rp);
+ break;
+
default:
bfa_stats(rp, sm_off_unexp);
bfa_sm_fault(rp->bfa, event);
@@ -4353,6 +4395,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_offp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_rport_offline_cb(rp);
break;
default:
@@ -4731,8 +4774,10 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
WARN_ON(speed == 0);
WARN_ON(speed == BFA_PORT_SPEED_AUTO);
- rport->rport_info.speed = speed;
- bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+ if (rport) {
+ rport->rport_info.speed = speed;
+ bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+ }
}
/* Set Rport LUN Mask */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index f30067564639..1abcf7c51661 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -97,10 +97,13 @@ struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
- struct list_head fcxp_free_q; /* free FCXPs */
- struct list_head fcxp_active_q; /* active FCXPs */
- struct list_head wait_q; /* wait queue for free fcxp */
- struct list_head fcxp_unused_q; /* unused fcxps */
+ struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
+ struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
+ struct list_head fcxp_active_q; /* active FCXPs */
+ struct list_head req_wait_q; /* wait queue for free req_fcxp */
+ struct list_head rsp_wait_q; /* wait queue for free rsp_fcxp */
+ struct list_head fcxp_req_unused_q; /* unused req_fcxps */
+ struct list_head fcxp_rsp_unused_q; /* unused rsp_fcxps */
u32 req_pld_sz;
u32 rsp_pld_sz;
struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
@@ -197,6 +200,7 @@ struct bfa_fcxp_s {
struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
struct bfa_reqq_wait_s reqq_wqe;
bfa_boolean_t reqq_waiting;
+ bfa_boolean_t req_rsp; /* Used to track req/rsp fcxp */
};
struct bfa_fcxp_wqe_s {
@@ -586,20 +590,22 @@ void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
/*
* bfa fcxp API functions
*/
-struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
+struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
- bfa_fcxp_get_sglen_t get_rsp_sglen);
-void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+ bfa_fcxp_get_sglen_t get_rsp_sglen,
+ bfa_boolean_t req);
+void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn,
void *cbarg, void *bfad_fcxp,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
- bfa_fcxp_get_sglen_t get_rsp_sglen);
+ bfa_fcxp_get_sglen_t get_rsp_sglen,
+ bfa_boolean_t req);
void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
struct bfa_fcxp_wqe_s *wqe);
void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
@@ -658,6 +664,7 @@ u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2c8f0c713076..c37494916a1a 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -57,6 +57,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
+int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
@@ -148,6 +149,8 @@ MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
" Range[64k|128k|256k|512k|1024k|2048k]");
+module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
static void
bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -736,6 +739,9 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
}
}
+ /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
+ pci_enable_pcie_error_reporting(pdev);
+
bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
@@ -806,6 +812,8 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
}
}
+ pci_save_state(pdev);
+
return 0;
out_release_region:
@@ -822,6 +830,8 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
pci_iounmap(pdev, bfad->pci_bar0_kva);
pci_iounmap(pdev, bfad->pci_bar2_kva);
pci_release_regions(pdev);
+ /* Disable PCIE Advanced Error Recovery (AER) */
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -1258,6 +1268,16 @@ bfad_setup_intr(struct bfad_s *bfad)
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
if (error) {
+ /* In CT1 & CT2, try to allocate just one vector */
+ if (bfa_asic_id_ctc(pdev->device)) {
+ printk(KERN_WARNING "bfa %s: trying one msix "
+ "vector failed to allocate %d[%d]\n",
+ bfad->pci_name, bfad->nvec, error);
+ bfad->nvec = 1;
+ error = pci_enable_msix(bfad->pcidev,
+ msix_entries, bfad->nvec);
+ }
+
/*
* Only error number of vector is available.
* We don't have a mechanism to map multiple
@@ -1267,12 +1287,13 @@ bfad_setup_intr(struct bfad_s *bfad)
* vectors. Linux doesn't duplicate vectors
* in the MSIX table for this case.
*/
-
- printk(KERN_WARNING "bfad%d: "
- "pci_enable_msix failed (%d),"
- " use line based.\n", bfad->inst_no, error);
-
- goto line_based;
+ if (error) {
+ printk(KERN_WARNING "bfad%d: "
+ "pci_enable_msix failed (%d), "
+ "use line based.\n",
+ bfad->inst_no, error);
+ goto line_based;
+ }
}
/* Disable INTX in MSI-X mode */
@@ -1470,6 +1491,197 @@ bfad_pci_remove(struct pci_dev *pdev)
kfree(bfad);
}
+/*
+ * PCI Error Recovery entry, error detected.
+ */
+static pci_ers_result_t
+bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+ unsigned long flags;
+ pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
+
+ dev_printk(KERN_ERR, &pdev->dev,
+ "error detected state: %d - flags: 0x%x\n",
+ state, bfad->bfad_flags);
+
+ switch (state) {
+ case pci_channel_io_normal: /* non-fatal error */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+ /* Suspend/fail all bfa operations */
+ bfa_ioc_suspend(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ del_timer_sync(&bfad->hal_tmo);
+ ret = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen: /* fatal error */
+ init_completion(&bfad->comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags |= BFAD_EEH_BUSY;
+ /* Suspend/fail all bfa operations */
+ bfa_ioc_suspend(&bfad->bfa.ioc);
+ bfa_fcs_stop(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure: /* PCI Card is DEAD */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags |= BFAD_EEH_BUSY |
+ BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* If the error_detected handler is called with the reason
+ * pci_channel_io_perm_failure - it will subsequently call
+ * pci_remove() entry point to remove the pci device from the
+ * system - So defer the cleanup to pci_remove(); cleaning up
+ * here causes inconsistent state during pci_remove().
+ */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return ret;
+}
+
+int
+restart_bfa(struct bfad_s *bfad)
+{
+ unsigned long flags;
+ struct pci_dev *pdev = bfad->pcidev;
+
+ bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
+ &bfad->meminfo, &bfad->hal_pcidev);
+
+ /* Enable Interrupt and wait bfa_init completion */
+ if (bfad_setup_intr(bfad)) {
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "%s: bfad_setup_intr failed\n", bfad->pci_name);
+ bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
+ return -1;
+ }
+
+ init_completion(&bfad->comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_iocfc_init(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* Set up interrupt handler for each vectors */
+ if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+ bfad_install_msix_handler(bfad))
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "%s: install_msix failed.\n", bfad->pci_name);
+
+ bfad_init_timer(bfad);
+ wait_for_completion(&bfad->comp);
+ bfad_drv_start(bfad);
+
+ return 0;
+}
+
+/*
+ * PCI Error Recovery entry, re-initialize the chip.
+ */
+static pci_ers_result_t
+bfad_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+ u8 byte;
+
+ dev_printk(KERN_ERR, &pdev->dev,
+ "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
+
+ if (pci_enable_device(pdev)) {
+ dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
+ "PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_restore_state(pdev);
+
+ /*
+ * Read some byte (e.g. DMA max. payload size which can't
+ * be 0xff any time) to make sure - we did not hit another PCI error
+ * in the middle of recovery. If we did, then declare permanent failure.
+ */
+ pci_read_config_byte(pdev, 0x68, &byte);
+ if (byte == 0xff) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "slot_reset failed ... got another PCI error !\n");
+ goto out_disable_device;
+ }
+
+ pci_save_state(pdev);
+ pci_set_master(pdev);
+
+ if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
+ goto out_disable_device;
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (restart_bfa(bfad) == -1)
+ goto out_disable_device;
+
+ pci_enable_pcie_error_reporting(pdev);
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+out_disable_device:
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static pci_ers_result_t
+bfad_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+ dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
+
+ /* Fetch FW diagnostic information */
+ bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
+
+ /* Cancel all pending IOs */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_fcs_stop(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static void
+bfad_pci_resume(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+ dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
+
+ /* wait until the link is online */
+ bfad_rport_online_wait(bfad);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
struct pci_device_id bfad_id_table[] = {
{
.vendor = BFA_PCI_VENDOR_ID_BROCADE,
@@ -1513,11 +1725,22 @@ struct pci_device_id bfad_id_table[] = {
MODULE_DEVICE_TABLE(pci, bfad_id_table);
+/*
+ * PCI error recovery handlers.
+ */
+static struct pci_error_handlers bfad_err_handler = {
+ .error_detected = bfad_pci_error_detected,
+ .slot_reset = bfad_pci_slot_reset,
+ .mmio_enabled = bfad_pci_mmio_enabled,
+ .resume = bfad_pci_resume,
+};
+
static struct pci_driver bfad_pci_driver = {
.name = BFAD_DRIVER_NAME,
.id_table = bfad_id_table,
.probe = bfad_pci_probe,
.remove = __devexit_p(bfad_pci_remove),
+ .err_handler = &bfad_err_handler,
};
/*
@@ -1546,6 +1769,7 @@ bfad_init(void)
bfa_auto_recover = ioc_auto_recover;
bfa_fcs_rport_set_del_timeout(rport_del_timeout);
+ bfa_fcs_rport_set_max_logins(max_rport_logins);
error = pci_register_driver(&bfad_pci_driver);
if (error) {
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index b83927440171..72f5dc32cc12 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -587,6 +587,37 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
+void
+bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *)vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
+ char *sym_name = fc_vport->symbolic_name;
+ struct bfa_fcs_vport_s *fcs_vport;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (strlen(sym_name) > 0) {
+ strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
+ bfa_fcs_lport_ns_util_send_rspn_id(
+ BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
@@ -640,6 +671,7 @@ struct fc_function_template bfad_im_fc_function_template = {
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
+ .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
.bsg_request = bfad_im_bsg_request,
.bsg_timeout = bfad_im_bsg_timeout,
};
@@ -792,6 +824,13 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 16Gbps PCIe dual port FC HBA");
+ } else if (!strcmp(model, "Brocade-1867")) {
+ if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe single port FC HBA for IBM");
+ else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe dual port FC HBA for IBM");
} else
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
@@ -909,15 +948,16 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
struct bfad_port_s *port = im_port->port;
struct bfad_s *bfad = im_port->bfad;
int nrports = 2048;
- wwn_t *rports = NULL;
+ struct bfa_rport_qualifier_s *rports = NULL;
unsigned long flags;
- rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
+ rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
+ GFP_ATOMIC);
if (rports == NULL)
return snprintf(buf, PAGE_SIZE, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
- bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
+ bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 9c1495b321d9..0afa39076cef 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -535,7 +535,8 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_lport_get_rports_s),
- sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+ sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
+ != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
@@ -552,8 +553,9 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
goto out;
}
- bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
- &iocmd->nrports);
+ bfa_fcs_lport_get_rport_quals(fcs_port,
+ (struct bfa_rport_qualifier_s *)iocmd_bufptr,
+ &iocmd->nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@@ -578,7 +580,11 @@ bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
goto out;
}
- fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (iocmd->pid)
+ fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
+ iocmd->rpwwn, iocmd->pid);
+ else
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -671,9 +677,11 @@ bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
sizeof(struct bfa_rport_stats_s));
- memcpy((void *)&iocmd->stats.hal_stats,
- (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
- sizeof(struct bfa_rport_hal_stats_s));
+ if (bfa_fcs_rport_get_halrport(fcs_rport)) {
+ memcpy((void *)&iocmd->stats.hal_stats,
+ (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+ sizeof(struct bfa_rport_hal_stats_s));
+ }
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
@@ -709,7 +717,8 @@ bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
rport = bfa_fcs_rport_get_halrport(fcs_rport);
- memset(&rport->stats, 0, sizeof(rport->stats));
+ if (rport)
+ memset(&rport->stats, 0, sizeof(rport->stats));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@@ -744,7 +753,8 @@ bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
fcs_rport->rpf.assigned_speed = iocmd->speed;
/* Set this speed in f/w only if the RPSC speed is not available */
if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
- bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+ if (fcs_rport->bfa_rport)
+ bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@@ -1030,9 +1040,10 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else {
iocmd->status = BFA_STATUS_OK;
- memcpy((void *)&iocmd->iostats, (void *)
- &(bfa_fcs_itnim_get_halitn(itnim)->stats),
- sizeof(struct bfa_itnim_iostats_s));
+ if (bfa_fcs_itnim_get_halitn(itnim))
+ memcpy((void *)&iocmd->iostats, (void *)
+ &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+ sizeof(struct bfa_itnim_iostats_s));
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2949,13 +2960,13 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
spin_lock_irqsave(&bfad->bfad_lock, flags);
/* Allocate bfa_fcxp structure */
- hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+ hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
drv_fcxp->num_req_sgles,
drv_fcxp->num_rsp_sgles,
bfad_fcxp_get_req_sgaddr_cb,
bfad_fcxp_get_req_sglen_cb,
bfad_fcxp_get_rsp_sgaddr_cb,
- bfad_fcxp_get_rsp_sglen_cb);
+ bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
if (!hal_fcxp) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 17ad67283130..8c569ddb750d 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -319,6 +319,8 @@ struct bfa_bsg_rport_attr_s {
u16 vf_id;
wwn_t pwwn;
wwn_t rpwwn;
+ u32 pid;
+ u32 rsvd;
struct bfa_rport_attr_s attr;
};
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7f74f1d19124..1840651ce1d4 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
+#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
@@ -56,7 +57,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.0.23.0"
+#define BFAD_DRIVER_VERSION "3.1.2.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
@@ -81,6 +82,8 @@
#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_PORT_DELETE 0x00000001
#define BFAD_INTX_ON 0x00000400
+#define BFAD_EEH_BUSY 0x00000800
+#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000
/*
* BFAD related definition
*/
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 2eebf8d4d58b..8f92732655c7 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1216,6 +1216,15 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
return 0;
}
+ if (bfad->bfad_flags & BFAD_EEH_BUSY) {
+ if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
+ cmnd->result = DID_NO_CONNECT << 16;
+ else
+ cmnd->result = DID_REQUEUE << 16;
+ done(cmnd);
+ return 0;
+ }
+
sg_cnt = scsi_dma_map(cmnd);
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 73f231ccd45b..8d4626c07a12 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1807,7 +1807,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
}
- memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 450e011f981a..76e4c039f0d5 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1422,7 +1422,8 @@ static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
-"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
+"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
+"DID_NEXUS_FAILURE" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 08d80a6d272a..6f4d8e6f32f1 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -641,8 +641,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
h->state = TPGS_STATE_STANDBY;
break;
case TPGS_STATE_OFFLINE:
- case TPGS_STATE_UNAVAILABLE:
- /* Path unusable for unavailable/offline */
+ /* Path unusable */
err = SCSI_DH_DEV_OFFLINED;
break;
default:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 20c4557f5abd..69c915aa77c2 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -790,29 +790,19 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1815"},
{"IBM", "1818"},
{"IBM", "3526"},
- {"SGI", "TP9400"},
- {"SGI", "TP9500"},
- {"SGI", "TP9700"},
+ {"SGI", "TP9"},
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
- {"SUN", "CSM200_R"},
- {"SUN", "LCSM100_I"},
- {"SUN", "LCSM100_S"},
- {"SUN", "LCSM100_E"},
- {"SUN", "LCSM100_F"},
- {"DELL", "MD3000"},
- {"DELL", "MD3000i"},
- {"DELL", "MD32xx"},
- {"DELL", "MD32xxi"},
- {"DELL", "MD36xxi"},
- {"DELL", "MD36xxf"},
- {"LSI", "INF-01-00"},
- {"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
- {"SUN", "CSM100_R_FC"},
+ {"SUN", "CSM"},
+ {"SUN", "LCSM100"},
{"SUN", "STK6580_6780"},
{"SUN", "SUN_6180"},
{"SUN", "ArrayStorage"},
+ {"DELL", "MD3"},
+ {"NETAPP", "INF-01-00"},
+ {"LSI", "INF-01-00"},
+ {"ENGENIO", "INF-01-00"},
{NULL, NULL},
};
@@ -863,7 +853,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
RDAC_NAME);
- return 0;
+ return -ENOMEM;
}
scsi_dh_data->scsi_dh = &rdac_dh;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 796482badf13..c425d7b4957e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -99,6 +99,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -118,13 +127,22 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
- {0x3350103C, "Smart Array", &SA5_access},
- {0x3351103C, "Smart Array", &SA5_access},
- {0x3352103C, "Smart Array", &SA5_access},
- {0x3353103C, "Smart Array", &SA5_access},
- {0x3354103C, "Smart Array", &SA5_access},
- {0x3355103C, "Smart Array", &SA5_access},
- {0x3356103C, "Smart Array", &SA5_access},
+ {0x3350103C, "Smart Array P222", &SA5_access},
+ {0x3351103C, "Smart Array P420", &SA5_access},
+ {0x3352103C, "Smart Array P421", &SA5_access},
+ {0x3353103C, "Smart Array P822", &SA5_access},
+ {0x3354103C, "Smart Array P420i", &SA5_access},
+ {0x3355103C, "Smart Array P220i", &SA5_access},
+ {0x3356103C, "Smart Array P721m", &SA5_access},
+ {0x1920103C, "Smart Array", &SA5_access},
+ {0x1921103C, "Smart Array", &SA5_access},
+ {0x1922103C, "Smart Array", &SA5_access},
+ {0x1923103C, "Smart Array", &SA5_access},
+ {0x1924103C, "Smart Array", &SA5_access},
+ {0x1925103C, "Smart Array", &SA5_access},
+ {0x1926103C, "Smart Array", &SA5_access},
+ {0x1928103C, "Smart Array", &SA5_access},
+ {0x334d103C, "Smart Array P822se", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -2609,7 +2627,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
/* not in reqQ, if also not in cmpQ, must have already completed */
found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
if (!found) {
- dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
+ dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
msg);
return SUCCESS;
}
@@ -3265,7 +3283,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.Timeout = 0; /* Don't time out */
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd;
- c->Request.CDB[1] = 0x03; /* Reset target above */
+ c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
/* If bytes 4-7 are zero, it means reset the */
/* LunID device */
c->Request.CDB[4] = 0x00;
@@ -3337,7 +3355,8 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap(page_base, page_offs + size);
+ void __iomem *page_remapped = ioremap_nocache(page_base,
+ page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
}
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index ff5b5c5538ee..cb150d1e5850 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,7 +1,3 @@
-obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
-
-ibmvscsic-y += ibmvscsi.o
-ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
-
+obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 134a0ae85bb7..5e8d51bd03de 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2242,6 +2242,21 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
}
/**
+ * ibmvfc_match_evt - Match function for specified event
+ * @evt: ibmvfc event struct
+ * @match: event to match
+ *
+ * Returns:
+ * 1 if event matches key / 0 if event does not match key
+ **/
+static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
+{
+ if (evt == match)
+ return 1;
+ return 0;
+}
+
+/**
* ibmvfc_abort_task_set - Abort outstanding commands to the device
* @sdev: scsi device to abort commands
*
@@ -2322,7 +2337,20 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
if (rc) {
sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
ibmvfc_reset_host(vhost);
- rsp_rc = 0;
+ rsp_rc = -EIO;
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+
+ if (rc == SUCCESS)
+ rsp_rc = 0;
+
+ rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
+ if (rc != SUCCESS) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_hard_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rsp_rc = 0;
+ }
+
goto out;
}
}
@@ -2597,8 +2625,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_SCN_FABRIC:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
- vhost->delay_init = 1;
- __ibmvfc_reset_host(vhost);
+ if (vhost->state < IBMVFC_HALTED) {
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ }
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 834c37fc7ce9..3be8af624e6f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.9"
-#define IBMVFC_DRIVER_DATE "(August 5, 2010)"
+#define IBMVFC_DRIVER_VERSION "1.0.10"
+#define IBMVFC_DRIVER_DATE "(August 24, 2012)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 3a6c4742951e..ef9a54c7da67 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
+static char partition_name[97] = "UNKNOWN";
+static unsigned int partition_number = -1;
static struct scsi_transport_template *ibmvscsi_transport_template;
#define IBMVSCSI_VERSION "1.5.9"
-static struct ibmvscsi_ops *ibmvscsi_ops;
-
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
MODULE_LICENSE("GPL");
@@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
module_param_named(client_reserve, client_reserve, int, S_IRUGO );
MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+ struct ibmvscsi_host_data *hostdata);
+
+/* ------------------------------------------------------------
+ * Routines for managing the command/response queue
+ */
+/**
+ * ibmvscsi_handle_event: - Interrupt handler for crq events
+ * @irq: number of irq to handle, not used
+ * @dev_instance: ibmvscsi_host_data of host that received interrupt
+ *
+ * Disables interrupts and schedules srp_task
+ * Always returns IRQ_HANDLED
+ */
+static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
+{
+ struct ibmvscsi_host_data *hostdata =
+ (struct ibmvscsi_host_data *)dev_instance;
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
+ tasklet_schedule(&hostdata->srp_task);
+ return IRQ_HANDLED;
+}
+
+/**
+ * release_crq_queue: - Deallocates data and unregisters CRQ
+ * @queue: crq_queue to initialize and register
+ * @host_data: ibmvscsi_host_data of host
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ */
+static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata,
+ int max_requests)
+{
+ long rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ free_irq(vdev->irq, (void *)hostdata);
+ tasklet_kill(&hostdata->srp_task);
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+ dma_unmap_single(hostdata->dev,
+ queue->msg_token,
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+ free_page((unsigned long)queue->msgs);
+}
+
+/**
+ * crq_queue_next_crq: - Returns the next entry in message queue
+ * @queue: crq_queue to use
+ *
+ * Returns pointer to next entry in queue, or NULL if there are no new
+ * entried in the CRQ.
+ */
+static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+{
+ struct viosrp_crq *crq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ crq = &queue->msgs[queue->cur];
+ if (crq->valid & 0x80) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
+ } else
+ crq = NULL;
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ return crq;
+}
+
+/**
+ * ibmvscsi_send_crq: - Send a CRQ
+ * @hostdata: the adapter
+ * @word1: the first 64 bits of the data
+ * @word2: the second 64 bits of the data
+ */
+static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+ u64 word1, u64 word2)
+{
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvscsi_task: - Process srps asynchronously
+ * @data: ibmvscsi_host_data of host
+ */
+static void ibmvscsi_task(void *data)
+{
+ struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ struct viosrp_crq *crq;
+ int done = 0;
+
+ while (!done) {
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
+ ibmvscsi_handle_crq(crq, hostdata);
+ crq->valid = 0x00;
+ }
+
+ vio_enable_interrupts(vdev);
+ crq = crq_queue_next_crq(&hostdata->queue);
+ if (crq != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvscsi_handle_crq(crq, hostdata);
+ crq->valid = 0x00;
+ } else {
+ done = 1;
+ }
+ }
+}
+
+static void gather_partition_info(void)
+{
+ struct device_node *rootdn;
+
+ const char *ppartition_name;
+ const unsigned int *p_number_ptr;
+
+ /* Retrieve information about this partition */
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn) {
+ return;
+ }
+
+ ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (ppartition_name)
+ strncpy(partition_name, ppartition_name,
+ sizeof(partition_name));
+ p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (p_number_ptr)
+ partition_number = *p_number_ptr;
+ of_node_put(rootdn);
+}
+
+static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+ memset(&hostdata->madapter_info, 0x00,
+ sizeof(hostdata->madapter_info));
+
+ dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
+ strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
+
+ strncpy(hostdata->madapter_info.partition_name, partition_name,
+ sizeof(hostdata->madapter_info.partition_name));
+
+ hostdata->madapter_info.partition_number = partition_number;
+
+ hostdata->madapter_info.mad_version = 1;
+ hostdata->madapter_info.os_type = 2;
+}
+
+/**
+ * reset_crq_queue: - resets a crq after a failure
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata)
+{
+ int rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ /* Close the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+ /* Clean out the queue */
+ memset(queue->msgs, 0x00, PAGE_SIZE);
+ queue->cur = 0;
+
+ set_adapter_info(hostdata);
+
+ /* And re-open it again */
+ rc = plpar_hcall_norets(H_REG_CRQ,
+ vdev->unit_address,
+ queue->msg_token, PAGE_SIZE);
+ if (rc == 2) {
+ /* Adapter is good, but other end is not ready */
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
+ } else if (rc != 0) {
+ dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ * Returns zero on success.
+ */
+static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata,
+ int max_requests)
+{
+ int rc;
+ int retrc;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
+
+ if (!queue->msgs)
+ goto malloc_failed;
+ queue->size = PAGE_SIZE / sizeof(*queue->msgs);
+
+ queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
+ queue->size * sizeof(*queue->msgs),
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
+ goto map_failed;
+
+ gather_partition_info();
+ set_adapter_info(hostdata);
+
+ retrc = rc = plpar_hcall_norets(H_REG_CRQ,
+ vdev->unit_address,
+ queue->msg_token, PAGE_SIZE);
+ if (rc == H_RESOURCE)
+ /* maybe kexecing and resource is busy. try a reset */
+ rc = ibmvscsi_reset_crq_queue(queue,
+ hostdata);
+
+ if (rc == 2) {
+ /* Adapter is good, but other end is not ready */
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
+ retrc = 0;
+ } else if (rc != 0) {
+ dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
+ goto reg_crq_failed;
+ }
+
+ queue->cur = 0;
+ spin_lock_init(&queue->lock);
+
+ tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
+ (unsigned long)hostdata);
+
+ if (request_irq(vdev->irq,
+ ibmvscsi_handle_event,
+ 0, "ibmvscsi", (void *)hostdata) != 0) {
+ dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
+ vdev->irq);
+ goto req_irq_failed;
+ }
+
+ rc = vio_enable_interrupts(vdev);
+ if (rc != 0) {
+ dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
+ goto req_irq_failed;
+ }
+
+ return retrc;
+
+ req_irq_failed:
+ tasklet_kill(&hostdata->srp_task);
+ rc = 0;
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+ reg_crq_failed:
+ dma_unmap_single(hostdata->dev,
+ queue->msg_token,
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+ map_failed:
+ free_page((unsigned long)queue->msgs);
+ malloc_failed:
+ return -1;
+}
+
+/**
+ * reenable_crq_queue: - reenables a crq after
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata)
+{
+ int rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ /* Re-enable the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+ } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+ if (rc)
+ dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
+ return rc;
+}
+
/* ------------------------------------------------------------
* Routines for the event pool and event structs
*/
@@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
}
if ((rc =
- ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+ ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
@@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
* @hostdata: ibmvscsi_host_data of host
*
*/
-void ibmvscsi_handle_crq(struct viosrp_crq *crq,
- struct ibmvscsi_host_data *hostdata)
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+ struct ibmvscsi_host_data *hostdata)
{
long rc;
unsigned long flags;
@@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0x01: /* Initialization message */
dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
- if ((rc = ibmvscsi_ops->send_crq(hostdata,
- 0xC002000000000000LL, 0)) == 0) {
+ rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
+ if (rc == 0) {
/* Now login */
init_adapter(hostdata);
} else {
@@ -1541,6 +1851,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
host_config = &evt_struct->iu.mad.host_config;
+ /* The transport length field is only 16-bit */
+ length = min(0xffff, length);
+
/* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config));
host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
@@ -1840,17 +2153,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
smp_rmb();
hostdata->reset_crq = 0;
- rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
+ rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
if (!rc)
- rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
vio_enable_interrupts(to_vio_dev(hostdata->dev));
} else if (hostdata->reenable_crq) {
smp_rmb();
action = "enable";
- rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
+ rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
hostdata->reenable_crq = 0;
if (!rc)
- rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
} else
return;
@@ -1944,7 +2257,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto init_crq_failed;
}
- rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
+ rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
if (rc != 0 && rc != H_RESOURCE) {
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
goto kill_kthread;
@@ -1974,7 +2287,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
* to fail if the other end is not acive. In that case we don't
* want to scan
*/
- if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
+ if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
|| rc == H_RESOURCE) {
/*
* Wait around max init_timeout secs for the adapter to finish
@@ -2002,7 +2315,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
add_host_failed:
release_event_pool(&hostdata->pool, hostdata);
init_pool_failed:
- ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
kill_kthread:
kthread_stop(hostdata->work_thread);
init_crq_failed:
@@ -2018,7 +2331,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
- ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
kthread_stop(hostdata->work_thread);
@@ -2039,7 +2352,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
static int ibmvscsi_resume(struct device *dev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
- return ibmvscsi_ops->resume(hostdata);
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
+ tasklet_schedule(&hostdata->srp_task);
+
+ return 0;
}
/**
@@ -2076,9 +2392,7 @@ int __init ibmvscsi_module_init(void)
driver_template.can_queue = max_requests;
max_events = max_requests + 2;
- if (firmware_has_feature(FW_FEATURE_VIO))
- ibmvscsi_ops = &rpavscsi_ops;
- else
+ if (!firmware_has_feature(FW_FEATURE_VIO))
return -ENODEV;
ibmvscsi_transport_template =
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index c503e1776014..7d64867c5dd1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -107,26 +107,4 @@ struct ibmvscsi_host_data {
dma_addr_t adapter_info_addr;
};
-/* routines for managing a command/response queue */
-void ibmvscsi_handle_crq(struct viosrp_crq *crq,
- struct ibmvscsi_host_data *hostdata);
-
-struct ibmvscsi_ops {
- int (*init_crq_queue)(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests);
- void (*release_crq_queue)(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests);
- int (*reset_crq_queue)(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata);
- int (*reenable_crq_queue)(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata);
- int (*send_crq)(struct ibmvscsi_host_data *hostdata,
- u64 word1, u64 word2);
- int (*resume) (struct ibmvscsi_host_data *hostdata);
-};
-
-extern struct ibmvscsi_ops rpavscsi_ops;
-
#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
deleted file mode 100644
index f48ae0190d95..000000000000
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/* ------------------------------------------------------------
- * rpa_vscsi.c
- * (C) Copyright IBM Corporation 1994, 2003
- * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
- * Santiago Leon (santil@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- * ------------------------------------------------------------
- * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
- *
- * This driver allows the Linux SCSI peripheral drivers to directly
- * access devices in the hosting partition, either on an iSeries
- * hypervisor system or a converged hypervisor system.
- */
-
-#include <asm/vio.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/hvcall.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include "ibmvscsi.h"
-
-static char partition_name[97] = "UNKNOWN";
-static unsigned int partition_number = -1;
-
-/* ------------------------------------------------------------
- * Routines for managing the command/response queue
- */
-/**
- * rpavscsi_handle_event: - Interrupt handler for crq events
- * @irq: number of irq to handle, not used
- * @dev_instance: ibmvscsi_host_data of host that received interrupt
- *
- * Disables interrupts and schedules srp_task
- * Always returns IRQ_HANDLED
- */
-static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
-{
- struct ibmvscsi_host_data *hostdata =
- (struct ibmvscsi_host_data *)dev_instance;
- vio_disable_interrupts(to_vio_dev(hostdata->dev));
- tasklet_schedule(&hostdata->srp_task);
- return IRQ_HANDLED;
-}
-
-/**
- * release_crq_queue: - Deallocates data and unregisters CRQ
- * @queue: crq_queue to initialize and register
- * @host_data: ibmvscsi_host_data of host
- *
- * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
- * the crq with the hypervisor.
- */
-static void rpavscsi_release_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- long rc = 0;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
- free_irq(vdev->irq, (void *)hostdata);
- tasklet_kill(&hostdata->srp_task);
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
- dma_unmap_single(hostdata->dev,
- queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
- free_page((unsigned long)queue->msgs);
-}
-
-/**
- * crq_queue_next_crq: - Returns the next entry in message queue
- * @queue: crq_queue to use
- *
- * Returns pointer to next entry in queue, or NULL if there are no new
- * entried in the CRQ.
- */
-static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
-{
- struct viosrp_crq *crq;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
- crq = &queue->msgs[queue->cur];
- if (crq->valid & 0x80) {
- if (++queue->cur == queue->size)
- queue->cur = 0;
- } else
- crq = NULL;
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return crq;
-}
-
-/**
- * rpavscsi_send_crq: - Send a CRQ
- * @hostdata: the adapter
- * @word1: the first 64 bits of the data
- * @word2: the second 64 bits of the data
- */
-static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
- u64 word1, u64 word2)
-{
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
- return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
-}
-
-/**
- * rpavscsi_task: - Process srps asynchronously
- * @data: ibmvscsi_host_data of host
- */
-static void rpavscsi_task(void *data)
-{
- struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
- struct viosrp_crq *crq;
- int done = 0;
-
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
- ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
- }
-
- vio_enable_interrupts(vdev);
- if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
- vio_disable_interrupts(vdev);
- ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
- } else {
- done = 1;
- }
- }
-}
-
-static void gather_partition_info(void)
-{
- struct device_node *rootdn;
-
- const char *ppartition_name;
- const unsigned int *p_number_ptr;
-
- /* Retrieve information about this partition */
- rootdn = of_find_node_by_path("/");
- if (!rootdn) {
- return;
- }
-
- ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
- if (ppartition_name)
- strncpy(partition_name, ppartition_name,
- sizeof(partition_name));
- p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
- if (p_number_ptr)
- partition_number = *p_number_ptr;
- of_node_put(rootdn);
-}
-
-static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
-{
- memset(&hostdata->madapter_info, 0x00,
- sizeof(hostdata->madapter_info));
-
- dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
- strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
-
- strncpy(hostdata->madapter_info.partition_name, partition_name,
- sizeof(hostdata->madapter_info.partition_name));
-
- hostdata->madapter_info.partition_number = partition_number;
-
- hostdata->madapter_info.mad_version = 1;
- hostdata->madapter_info.os_type = 2;
-}
-
-/**
- * reset_crq_queue: - resets a crq after a failure
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- int rc = 0;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
- /* Close the CRQ */
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-
- /* Clean out the queue */
- memset(queue->msgs, 0x00, PAGE_SIZE);
- queue->cur = 0;
-
- set_adapter_info(hostdata);
-
- /* And re-open it again */
- rc = plpar_hcall_norets(H_REG_CRQ,
- vdev->unit_address,
- queue->msg_token, PAGE_SIZE);
- if (rc == 2) {
- /* Adapter is good, but other end is not ready */
- dev_warn(hostdata->dev, "Partner adapter not ready\n");
- } else if (rc != 0) {
- dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
- }
- return rc;
-}
-
-/**
- * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- * Allocates a page for messages, maps it for dma, and registers
- * the crq with the hypervisor.
- * Returns zero on success.
- */
-static int rpavscsi_init_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- int rc;
- int retrc;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
- queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-
- if (!queue->msgs)
- goto malloc_failed;
- queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
- queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
- queue->size * sizeof(*queue->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(hostdata->dev, queue->msg_token))
- goto map_failed;
-
- gather_partition_info();
- set_adapter_info(hostdata);
-
- retrc = rc = plpar_hcall_norets(H_REG_CRQ,
- vdev->unit_address,
- queue->msg_token, PAGE_SIZE);
- if (rc == H_RESOURCE)
- /* maybe kexecing and resource is busy. try a reset */
- rc = rpavscsi_reset_crq_queue(queue,
- hostdata);
-
- if (rc == 2) {
- /* Adapter is good, but other end is not ready */
- dev_warn(hostdata->dev, "Partner adapter not ready\n");
- retrc = 0;
- } else if (rc != 0) {
- dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
- goto reg_crq_failed;
- }
-
- queue->cur = 0;
- spin_lock_init(&queue->lock);
-
- tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
- (unsigned long)hostdata);
-
- if (request_irq(vdev->irq,
- rpavscsi_handle_event,
- 0, "ibmvscsi", (void *)hostdata) != 0) {
- dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
- vdev->irq);
- goto req_irq_failed;
- }
-
- rc = vio_enable_interrupts(vdev);
- if (rc != 0) {
- dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
- goto req_irq_failed;
- }
-
- return retrc;
-
- req_irq_failed:
- tasklet_kill(&hostdata->srp_task);
- rc = 0;
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
- reg_crq_failed:
- dma_unmap_single(hostdata->dev,
- queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
- map_failed:
- free_page((unsigned long)queue->msgs);
- malloc_failed:
- return -1;
-}
-
-/**
- * reenable_crq_queue: - reenables a crq after
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- int rc = 0;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
- /* Re-enable the CRQ */
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
- } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-
- if (rc)
- dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
- return rc;
-}
-
-/**
- * rpavscsi_resume: - resume after suspend
- * @hostdata: ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
-{
- vio_disable_interrupts(to_vio_dev(hostdata->dev));
- tasklet_schedule(&hostdata->srp_task);
- return 0;
-}
-
-struct ibmvscsi_ops rpavscsi_ops = {
- .init_crq_queue = rpavscsi_init_crq_queue,
- .release_crq_queue = rpavscsi_release_crq_queue,
- .reset_crq_queue = rpavscsi_reset_crq_queue,
- .reenable_crq_queue = rpavscsi_reenable_crq_queue,
- .send_crq = rpavscsi_send_crq,
- .resume = rpavscsi_resume,
-};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 467dc38246f9..ec8abb08cb71 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -192,7 +192,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
-static int ipr_max_bus_speeds [] = {
+static int ipr_max_bus_speeds[] = {
IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
};
@@ -562,10 +562,27 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->u.add_data = add_data;
}
#else
-#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
+#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
#endif
/**
+ * ipr_lock_and_done - Acquire lock and complete command
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long lock_flags;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_cmd->done(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
* ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
* @ipr_cmd: ipr command struct
*
@@ -611,34 +628,50 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
+ void (*fast_done) (struct ipr_cmnd *))
{
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
/**
- * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
list_del(&ipr_cmd->queue);
- ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd;
}
/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
+ return ipr_cmd;
+}
+
+/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
* @clr_ints: interrupts to clear
@@ -1002,7 +1035,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
**/
static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
{
- switch(proto) {
+ switch (proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
res->ata_class = ATA_DEV_ATA;
@@ -3043,7 +3076,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
}
#else
-#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
+#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
#endif
/**
@@ -3055,7 +3088,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
**/
static void ipr_release_dump(struct kref *kref)
{
- struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
+ struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
unsigned long lock_flags = 0;
int i;
@@ -3142,7 +3175,7 @@ restart:
break;
}
}
- } while(did_work);
+ } while (did_work);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
@@ -3268,7 +3301,7 @@ static ssize_t ipr_show_log_level(struct device *dev,
* number of bytes printed to buffer
**/
static ssize_t ipr_store_log_level(struct device *dev,
- struct device_attribute *attr,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
@@ -3315,7 +3348,7 @@ static ssize_t ipr_store_diagnostics(struct device *dev,
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3682,7 +3715,7 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
unsigned long lock_flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3746,7 +3779,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
len = snprintf(fname, 99, "%s", buf);
fname[len-1] = '\0';
- if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
+ if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
return -EIO;
}
@@ -4612,7 +4645,7 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
* Return value:
* SUCCESS / FAILED
**/
-static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
+static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
int rc;
@@ -4634,7 +4667,7 @@ static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
return rc;
}
-static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
int rc;
@@ -4701,7 +4734,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
}
LEAVE;
- return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
+ return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
}
/**
@@ -4725,7 +4758,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -4753,7 +4786,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
* Return value:
* SUCCESS / FAILED
**/
-static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
+static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
@@ -4811,10 +4844,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
res->resetting_device = 0;
LEAVE;
- return (rc ? FAILED : SUCCESS);
+ return rc ? FAILED : SUCCESS;
}
-static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
+static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
@@ -4910,7 +4943,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
* Return value:
* SUCCESS / FAILED
**/
-static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
+static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
@@ -4979,7 +5012,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
res->needs_sync_complete = 1;
LEAVE;
- return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
+ return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
@@ -4989,7 +5022,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
* Return value:
* SUCCESS / FAILED
**/
-static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
+static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
@@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
+ goto unlock_out;
}
ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
- list_del(&ipr_cmd->queue);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ list_move_tail(&ipr_cmd->queue, &doneq);
rc = IRQ_HANDLED;
@@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
+ goto unlock_out;
} else
break;
}
@@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+unlock_out:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
return rc;
}
@@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ scsi_dma_unmap(scsi_cmd);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
- } else
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ } else {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ }
}
/**
* ipr_queuecommand - Queue a mid-layer request
+ * @shost: scsi host struct
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer.
*
@@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
**/
-static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
- void (*done) (struct scsi_cmnd *))
+static int ipr_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- int rc = 0;
+ unsigned long lock_flags;
+ int rc;
- scsi_cmd->scsi_done = done;
- ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
- res = scsi_cmd->device->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
+ res = scsi_cmd->device->hostdata;
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
+ if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
/*
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
- return 0;
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ goto err_nodev;
+ }
+
+ if (ipr_is_gata(res) && res->sata_port) {
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return rc;
}
- if (ipr_is_gata(res) && res->sata_port)
- return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd;
- ioarcb->res_handle = res->res_handle;
- ipr_cmd->done = ipr_scsi_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
+ ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0)
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
- if (res->needs_sync_complete) {
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
- res->needs_sync_complete = 0;
- }
-
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
if (ipr_is_gscsi(res))
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5859,23 +5905,46 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (likely(rc == 0)) {
- if (ioa_cfg->sis64)
- rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
- else
- rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- }
+ if (ioa_cfg->sis64)
+ rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+ else
+ rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- if (unlikely(rc != 0)) {
- list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (!rc)
+ scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (unlikely(ioa_cfg->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ scsi_dma_unmap(scsi_cmd);
+ goto err_nodev;
+ }
+
+ ioarcb->res_handle = res->res_handle;
+ if (res->needs_sync_complete) {
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+ res->needs_sync_complete = 0;
+ }
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
-}
-static DEF_SCSI_QCMD(ipr_queuecommand)
+err_nodev:
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd->result = (DID_NO_CONNECT << 16);
+ scsi_cmd->scsi_done(scsi_cmd);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return 0;
+}
/**
* ipr_ioctl - IOCTL handler
@@ -5907,7 +5976,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
* Return value:
* pointer to buffer with description string
**/
-static const char * ipr_ioa_info(struct Scsi_Host *host)
+static const char *ipr_ioa_info(struct Scsi_Host *host)
{
static char buffer[512];
struct ipr_ioa_cfg *ioa_cfg;
@@ -5965,7 +6034,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6005,7 +6074,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6330,7 +6399,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
int i;
if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
- for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
+ for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
if (__is_processor(ipr_blocked_processors[i]))
return 1;
}
@@ -6608,7 +6677,7 @@ static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
* none
**/
static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_mode_pages *mode_pages)
+ struct ipr_mode_pages *mode_pages)
{
int i, entry_length;
struct ipr_dev_bus_entry *bus;
@@ -8022,7 +8091,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
rc = ipr_cmd->job_step(ipr_cmd);
- } while(rc == IPR_RC_JOB_CONTINUE);
+ } while (rc == IPR_RC_JOB_CONTINUE);
}
/**
@@ -8283,7 +8352,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
if (ioa_cfg->ipr_cmd_pool)
- pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
+ pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
@@ -8363,8 +8432,8 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
dma_addr_t dma_addr;
int i;
- ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
- sizeof(struct ipr_cmnd), 512, 0);
+ ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
+ sizeof(struct ipr_cmnd), 512, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
@@ -8378,7 +8447,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
- ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
+ ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
if (!ipr_cmd) {
ipr_free_cmd_blks(ioa_cfg);
@@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
- ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
- sata_port_info.flags, &ipr_sata_ops);
+ ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
@@ -8964,7 +9032,7 @@ static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
int target, lun;
for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
- for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
+ for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
}
@@ -9010,7 +9078,7 @@ static void __ipr_remove(struct pci_dev *pdev)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9139,7 +9207,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 153b8bd91d1e..c8a137f83bb1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.3"
-#define IPR_DRIVER_DATE "(March 10, 2012)"
+#define IPR_DRIVER_VERSION "2.5.4"
+#define IPR_DRIVER_DATE "(July 11, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1525,6 +1525,7 @@ struct ipr_cmnd {
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
+ void (*fast_done) (struct ipr_cmnd *);
void (*done) (struct ipr_cmnd *);
int (*job_step) (struct ipr_cmnd *);
int (*job_step_failed) (struct ipr_cmnd *);
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index b425ed523ccc..b0afa4486158 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1044,7 +1044,7 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
return SCI_SUCCESS;
}
-void isci_host_scan_start(struct Scsi_Host *shost)
+void isci_host_start(struct Scsi_Host *shost)
{
struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
@@ -1079,7 +1079,6 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
{
- task->lldd_task = NULL;
if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
@@ -1087,16 +1086,19 @@ void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_ta
dev_dbg(&ihost->pdev->dev,
"%s: Normal - ireq/task = %p/%p\n",
__func__, ireq, task);
-
+ task->lldd_task = NULL;
task->task_done(task);
} else {
dev_dbg(&ihost->pdev->dev,
"%s: Error - ireq/task = %p/%p\n",
__func__, ireq, task);
-
+ if (sas_protocol_ata(task->task_proto))
+ task->lldd_task = NULL;
sas_task_abort(task);
}
- }
+ } else
+ task->lldd_task = NULL;
+
if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
wake_up_all(&ihost->eventq);
@@ -1120,10 +1122,16 @@ void isci_host_completion_routine(unsigned long data)
sci_controller_completion_handler(ihost);
spin_unlock_irq(&ihost->scic_lock);
- /* the coalesence timeout doubles at each encoding step, so
+ /*
+ * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
+ * issued for hardware issue workaround
+ */
+ active = isci_tci_active(ihost) - SCI_MAX_PORTS;
+
+ /*
+ * the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests
*/
- active = isci_tci_active(ihost);
writel(SMU_ICC_GEN_VAL(NUMBER, active) |
SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
&ihost->smu_registers->interrupt_coalesce_control);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 9ab58e0540e7..4911310a38f5 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -473,7 +473,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
-void isci_host_scan_start(struct Scsi_Host *);
+void isci_host_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 1286a8a787e2..5ed4e608f200 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -156,7 +156,7 @@ static struct scsi_host_template isci_sht = {
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = isci_host_scan_finished,
- .scan_start = isci_host_scan_start,
+ .scan_start = isci_host_start,
.change_queue_depth = sas_change_queue_depth,
.change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
@@ -721,11 +721,67 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PM
+static int isci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ sas_suspend_ha(&ihost->sas_ha);
+ isci_host_deinit(ihost);
+ }
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int isci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct isci_host *ihost;
+ int rc, i;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "enabling device failure after resume(%d)\n", rc);
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ for_each_isci_host(i, ihost, pdev) {
+ sas_prep_resume_ha(&ihost->sas_ha);
+
+ isci_host_init(ihost);
+ isci_host_start(ihost->sas_ha.core.shost);
+ wait_for_start(ihost);
+
+ sas_resume_ha(&ihost->sas_ha);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
+#endif
+
static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
.remove = __devexit_p(isci_pci_remove),
+#ifdef CONFIG_PM
+ .driver.pm = &isci_pm_ops,
+#endif
};
static __init int isci_init(void)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 922086105b4b..1b91ca0dc1e3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -55,7 +55,7 @@ static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
static struct scsi_host_template iscsi_sw_tcp_sht;
static struct iscsi_transport iscsi_sw_tcp_transport;
-static unsigned int iscsi_max_lun = 512;
+static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
static int iscsi_sw_tcp_dbg;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index a59fcdc8fd63..bdb81cda8401 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -580,10 +580,7 @@ int sas_ata_init(struct domain_device *found_dev)
struct ata_port *ap;
int rc;
- ata_host_init(&found_dev->sata_dev.ata_host,
- ha->dev,
- sata_port_info.flags,
- &sas_sata_ops);
+ ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
&sata_port_info,
shost);
@@ -700,6 +697,92 @@ void sas_probe_sata(struct asd_sas_port *port)
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
+
+}
+
+static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+{
+ struct domain_device *dev, *n;
+ bool retry = false;
+
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
+ int rc;
+
+ if (!dev_is_sata(dev))
+ continue;
+
+ sas_ata_wait_eh(dev);
+ rc = dev->sata_dev.pm_result;
+ if (rc == -EAGAIN)
+ retry = true;
+ else if (rc) {
+ /* since we don't have a
+ * ->port_{suspend|resume} routine in our
+ * ata_port ops, and no entanglements with
+ * acpi, suspend should just be mechanical trip
+ * through eh, catch cases where these
+ * assumptions are invalidated
+ */
+ WARN_ONCE(1, "failed %s %s error: %d\n", func,
+ dev_name(&dev->rphy->dev), rc);
+ }
+
+ /* if libata failed to power manage the device, tear it down */
+ if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ sas_fail_probe(dev, func, -ENODEV);
+ }
+
+ return retry;
+}
+
+void sas_suspend_sata(struct asd_sas_port *port)
+{
+ struct domain_device *dev;
+
+ retry:
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ struct sata_device *sata;
+
+ if (!dev_is_sata(dev))
+ continue;
+
+ sata = &dev->sata_dev;
+ if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
+ continue;
+
+ sata->pm_result = -EIO;
+ ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
+ }
+ mutex_unlock(&port->ha->disco_mutex);
+
+ if (sas_ata_flush_pm_eh(port, __func__))
+ goto retry;
+}
+
+void sas_resume_sata(struct asd_sas_port *port)
+{
+ struct domain_device *dev;
+
+ retry:
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ struct sata_device *sata;
+
+ if (!dev_is_sata(dev))
+ continue;
+
+ sata = &dev->sata_dev;
+ if (sata->ap->pm_mesg.event == PM_EVENT_ON)
+ continue;
+
+ sata->pm_result = -EIO;
+ ata_sas_port_async_resume(sata->ap, &sata->pm_result);
+ }
+ mutex_unlock(&port->ha->disco_mutex);
+
+ if (sas_ata_flush_pm_eh(port, __func__))
+ goto retry;
}
/**
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 3e9dc1a84358..a0c3003e0c7d 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -24,6 +24,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
@@ -180,16 +181,18 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
- if (i->dft->lldd_dev_found) {
- res = i->dft->lldd_dev_found(dev);
- if (res) {
- printk("sas: driver on pcidev %s cannot handle "
- "device %llx, error:%d\n",
- dev_name(sas_ha->dev),
- SAS_ADDR(dev->sas_addr), res);
- }
- kref_get(&dev->kref);
+ if (!i->dft->lldd_dev_found)
+ return 0;
+
+ res = i->dft->lldd_dev_found(dev);
+ if (res) {
+ printk("sas: driver on pcidev %s cannot handle "
+ "device %llx, error:%d\n",
+ dev_name(sas_ha->dev),
+ SAS_ADDR(dev->sas_addr), res);
}
+ set_bit(SAS_DEV_FOUND, &dev->state);
+ kref_get(&dev->kref);
return res;
}
@@ -200,7 +203,10 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
- if (i->dft->lldd_dev_gone) {
+ if (!i->dft->lldd_dev_gone)
+ return;
+
+ if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
i->dft->lldd_dev_gone(dev);
sas_put_device(dev);
}
@@ -234,6 +240,47 @@ static void sas_probe_devices(struct work_struct *work)
}
}
+static void sas_suspend_devices(struct work_struct *work)
+{
+ struct asd_sas_phy *phy;
+ struct domain_device *dev;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+ struct Scsi_Host *shost = port->ha->core.shost;
+ struct sas_internal *si = to_sas_internal(shost->transportt);
+
+ clear_bit(DISCE_SUSPEND, &port->disc.pending);
+
+ sas_suspend_sata(port);
+
+ /* lldd is free to forget the domain_device across the
+ * suspension, we force the issue here to keep the reference
+ * counts aligned
+ */
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ sas_notify_lldd_dev_gone(dev);
+
+ /* we are suspending, so we know events are disabled and
+ * phy_list is not being mutated
+ */
+ list_for_each_entry(phy, &port->phy_list, port_phy_el) {
+ if (si->dft->lldd_port_formed)
+ si->dft->lldd_port_deformed(phy);
+ phy->suspended = 1;
+ port->suspended = 1;
+ }
+}
+
+static void sas_resume_devices(struct work_struct *work)
+{
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_RESUME, &port->disc.pending);
+
+ sas_resume_sata(port);
+}
+
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
* @end: pointer to domain device of interest
@@ -530,6 +577,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
[DISCE_PROBE] = sas_probe_devices,
+ [DISCE_SUSPEND] = sas_suspend_devices,
+ [DISCE_RESUME] = sas_resume_devices,
[DISCE_DESTRUCT] = sas_destruct_devices,
};
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index fc460933575c..cd6f99c1ae7e 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -41,6 +41,7 @@ static const char *sas_phye_str[] = {
[1] = "PHYE_OOB_DONE",
[2] = "PHYE_OOB_ERROR",
[3] = "PHYE_SPINUP_HOLD",
+ [4] = "PHYE_RESUME_TIMEOUT",
};
void sas_dprint_porte(int phyid, enum port_event pe)
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 789c4d8bb7a7..aadbd5314c5c 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -134,7 +134,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
&phy->port_events[event].work, ha);
}
-static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
struct sas_ha_struct *ha = phy->ha;
@@ -159,7 +159,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
sas_ha->notify_ha_event = notify_ha_event;
sas_ha->notify_port_event = notify_port_event;
- sas_ha->notify_phy_event = notify_phy_event;
+ sas_ha->notify_phy_event = sas_notify_phy_event;
return 0;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 014297c05880..dbc8a793fd86 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -178,7 +178,7 @@ Undo_phys:
return error;
}
-int sas_unregister_ha(struct sas_ha_struct *sas_ha)
+static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
/* Set the state to unregistered to avoid further unchained
* events to be queued, and flush any in-progress drainers
@@ -189,7 +189,11 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
spin_unlock_irq(&sas_ha->lock);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+}
+int sas_unregister_ha(struct sas_ha_struct *sas_ha)
+{
+ sas_disable_events(sas_ha);
sas_unregister_ports(sas_ha);
/* flush unregistration work */
@@ -381,6 +385,90 @@ int sas_set_phy_speed(struct sas_phy *phy,
return ret;
}
+void sas_prep_resume_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ set_bit(SAS_HA_REGISTERED, &ha->state);
+
+ /* clear out any stale link events/data from the suspension path */
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ phy->port_events_pending = 0;
+ phy->phy_events_pending = 0;
+ phy->frame_rcvd_size = 0;
+ }
+}
+EXPORT_SYMBOL(sas_prep_resume_ha);
+
+static int phys_suspended(struct sas_ha_struct *ha)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ if (phy->suspended)
+ rc++;
+ }
+
+ return rc;
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ const unsigned long tmo = msecs_to_jiffies(25000);
+ int i;
+
+ /* deform ports on phys that did not resume
+ * at this point we may be racing the phy coming back (as posted
+ * by the lldd). So we post the event and once we are in the
+ * libsas context check that the phy remains suspended before
+ * tearing it down.
+ */
+ i = phys_suspended(ha);
+ if (i)
+ dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
+ i, i > 1 ? "s" : "");
+ wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ if (phy->suspended) {
+ dev_warn(&phy->phy->dev, "resume timeout\n");
+ sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
+ }
+ }
+
+ /* all phys are back up or timed out, turn on i/o so we can
+ * flush out disks that did not return
+ */
+ scsi_unblock_requests(ha->core.shost);
+ sas_drain_work(ha);
+}
+EXPORT_SYMBOL(sas_resume_ha);
+
+void sas_suspend_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ sas_disable_events(ha);
+ scsi_block_requests(ha->core.shost);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+
+ sas_discover_event(port, DISCE_SUSPEND);
+ }
+
+ /* flush suspend events while unregistered */
+ mutex_lock(&ha->drain_mutex);
+ __sas_drain_work(ha);
+ mutex_unlock(&ha->drain_mutex);
+}
+EXPORT_SYMBOL(sas_suspend_ha);
+
static void sas_phy_release(struct sas_phy *phy)
{
kfree(phy->hostdata);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 507e4cf12e56..1de67964e5a1 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -89,6 +89,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 521422e857ab..cdee446c29e1 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -94,6 +94,25 @@ static void sas_phye_spinup_hold(struct work_struct *work)
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
+static void sas_phye_resume_timeout(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
+
+ /* phew, lldd got the phy back in the nick of time */
+ if (!phy->suspended) {
+ dev_info(&phy->phy->dev, "resume timeout cancelled\n");
+ return;
+ }
+
+ phy->error = 0;
+ phy->suspended = 0;
+ sas_deform_port(phy, 1);
+}
+
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
@@ -105,6 +124,8 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
[PHYE_OOB_DONE] = sas_phye_oob_done,
[PHYE_OOB_ERROR] = sas_phye_oob_error,
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+
};
static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e884a8c58a0c..1398b714c018 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -39,6 +39,49 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
return true;
}
+static void sas_resume_port(struct asd_sas_phy *phy)
+{
+ struct domain_device *dev;
+ struct asd_sas_port *port = phy->port;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (si->dft->lldd_port_formed)
+ si->dft->lldd_port_formed(phy);
+
+ if (port->suspended)
+ port->suspended = 0;
+ else {
+ /* we only need to handle "link returned" actions once */
+ return;
+ }
+
+ /* if the port came back:
+ * 1/ presume every device came back
+ * 2/ force the next revalidation to check all expander phys
+ */
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ int i, rc;
+
+ rc = sas_notify_lldd_dev_found(dev);
+ if (rc) {
+ sas_unregister_dev(port, dev);
+ continue;
+ }
+
+ if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+ dev->ex_dev.ex_change_count = -1;
+ for (i = 0; i < dev->ex_dev.num_phys; i++) {
+ struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
+
+ phy->phy_change_count = -1;
+ }
+ }
+ }
+
+ sas_discover_event(port, DISCE_RESUME);
+}
+
/**
* sas_form_port -- add this phy to a port
* @phy: the phy of interest
@@ -58,7 +101,14 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
sas_deform_port(phy, 0);
- else {
+ else if (phy->suspended) {
+ phy->suspended = 0;
+ sas_resume_port(phy);
+
+ /* phy came back, try to cancel the timeout */
+ wake_up(&sas_ha->eh_wait_q);
+ return;
+ } else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
phy->port->num_phys);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a65c05a8d488..a184c2443a64 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,6 +73,8 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
+
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
@@ -684,6 +686,7 @@ struct lpfc_hba {
#define LPFC_FCF_FOV 1 /* Fast fcf failover */
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
+ uint32_t cfg_fcp_io_sched;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -695,6 +698,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_wq_count;
uint32_t cfg_fcp_eq_count;
+ uint32_t cfg_fcp_io_channel;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@@ -732,7 +736,7 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
- uint32_t fcp_qidx; /* next work queue to post work to */
+ atomic_t fcp_qidx; /* next work queue to post work to */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index adef5bb2100e..b032562aa0d9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3643,18 +3643,25 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
int val = 0, i;
+ /* fcp_imax is only valid for SLI4 */
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return -EINVAL;
+
/* Sanity check on user data */
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
- /* Value range is [636,651042] */
- if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
+ /*
+ * Value range for the HBA is [5000,5000000]
+ * The value for each EQ depends on how many EQs are configured.
+ */
+ if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
- for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
+ for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, i);
return strlen(buf);
@@ -3662,13 +3669,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
/*
# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
+# for the HBA.
#
-# Value range is [636,651042]. Default value is 10000.
+# Value range is [5,000 to 5,000,000]. Default value is 50,000.
*/
-static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
+static int lpfc_fcp_imax = LPFC_DEF_IMAX;
module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_imax,
- "Set the maximum number of fast-path FCP interrupts per second");
+ "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)
/**
@@ -3687,14 +3695,19 @@ lpfc_param_show(fcp_imax)
static int
lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
{
- if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ phba->cfg_fcp_imax = 0;
+ return 0;
+ }
+
+ if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
phba->cfg_fcp_imax = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3016 fcp_imax: %d out of range, using default\n", val);
- phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
+ phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
}
@@ -3765,6 +3778,16 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
+# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
+# range is [0,1]. Default value is 0.
+# For [0], FCP commands are issued to Work Queues ina round robin fashion.
+# For [1], FCP commands are issued to a Work Queue associated with the
+# current CPU.
+*/
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+ "issuing commands [0] - Round Robin, [1] - Current CPU");
+
+/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -3844,21 +3867,33 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
/*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+# This parameter is ignored and will eventually be depricated
#
-# Value range is [1,31]. Default value is 4.
+# Value range is [1,7]. Default value is 4.
*/
-LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+ LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP work queues, if possible");
/*
-# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
#
-# Value range is [1,7]. Default value is 1.
+# Value range is [1,7]. Default value is 4.
*/
-LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+ LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP event queues, if possible");
/*
+# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
+#
+# Value range is [1,7]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+ LPFC_FCP_IO_CHAN_MAX,
+ "Set the number of FCP I/O channels");
+
+/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
# 1 = HBA resets enabled (default)
@@ -3883,6 +3918,17 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
+# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
+# 0 = disabled (default)
+# 1 = enabled
+# Value range is [0,1]. Default value is 0.
+*/
+unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
+
+module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
+MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
+
+/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
@@ -3976,6 +4022,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
+ &dev_attr_lpfc_fcp_io_sched,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
@@ -4002,6 +4049,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count,
+ &dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@@ -4964,6 +5012,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -4980,6 +5029,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
+ lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 253d9a857346..f7368eb80415 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -195,7 +195,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & 0xff) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1234,7 +1234,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & 0xff) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1714,6 +1714,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
phba->sli4_hba.lnk_info.lnk_no);
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
+ LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
@@ -4796,7 +4798,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & 0xff) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8a2a514a2553..e470c489de07 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
-irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
-irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -391,6 +390,7 @@ extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
extern unsigned int lpfc_prot_mask;
extern unsigned char lpfc_prot_guard;
+extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@@ -457,6 +457,8 @@ int lpfc_sli4_queue_create(struct lpfc_hba *);
void lpfc_sli4_queue_destroy(struct lpfc_hba *);
void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli_abts_recover_port(struct lpfc_vport *,
+ struct lpfc_nodelist *);
int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
int lpfc_issue_reg_vfi(struct lpfc_vport *);
int lpfc_issue_unreg_vfi(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 93e96b3c9097..7ffabb7e3afa 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+ ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -633,7 +634,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
+ (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
+ IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
/* CT command is being retried */
@@ -783,7 +785,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (irsp->un.ulpWord[4]) {
+ switch ((irsp->un.ulpWord[4] &
+ IOERR_PARAM_MASK)) {
+
case IOERR_NO_RESOURCES:
/* We don't increment the retry
* count for this case.
@@ -908,8 +912,10 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
+ (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3217d63ed282..f63f5ff7f274 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
len += snprintf(buf+len, size-len,
"Ring %d: CMD GetInx:%d (Max:%d Next:%d "
"Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
- i, pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx,
- pring->flag, pgpp->rspPutInx, pring->numRiocb);
+ i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
+ pring->sli.sli3.next_cmdidx,
+ pring->sli.sli3.local_getidx,
+ pring->flag, pgpp->rspPutInx,
+ pring->sli.sli3.numRiocb);
}
if (phba->sli_rev <= LPFC_SLI_REV3) {
@@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
case NLP_STE_PRLI_ISSUE:
statep = "PRLI ";
break;
+ case NLP_STE_LOGO_ISSUE:
+ statep = "LOGO ";
+ break;
case NLP_STE_UNMAPPED_NODE:
statep = "UNMAP ";
break;
@@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7));
- len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
- ndlp->nlp_rpi, ndlp->nlp_flag);
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ len += snprintf(buf+len, size-len, "RPI:%03d ",
+ ndlp->nlp_rpi);
+ else
+ len += snprintf(buf+len, size-len, "RPI:none ");
+ len += snprintf(buf+len, size-len, "flag:x%08x ",
+ ndlp->nlp_flag);
if (!ndlp->nlp_type)
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
@@ -1999,207 +2009,298 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
- int len = 0, fcp_qidx;
+ int len = 0;
char *pbuffer;
+ int x, cnt;
+ int max_cnt;
+ struct lpfc_queue *qp = NULL;
+
if (!debug->buffer)
debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
if (!debug->buffer)
return 0;
pbuffer = debug->buffer;
+ max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
if (*ppos)
return 0;
- /* Get slow-path event queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path EQ information:\n");
- if (phba->sli4_hba.sp_eq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tEQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
- phba->sli4_hba.sp_eq->queue_id,
- phba->sli4_hba.sp_eq->entry_count,
- phba->sli4_hba.sp_eq->entry_size,
- phba->sli4_hba.sp_eq->host_index,
- phba->sli4_hba.sp_eq->hba_index);
- }
+ spin_lock_irq(&phba->hbalock);
+
+ /* Fast-path event queue */
+ if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
+ cnt = phba->cfg_fcp_io_channel;
+
+ for (x = 0; x < cnt; x++) {
+
+ /* Fast-path EQ */
+ qp = phba->sli4_hba.hba_eq[x];
+ if (!qp)
+ goto proc_cq;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nHBA EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+proc_cq:
+ /* Fast-path FCP CQ */
+ qp = phba->sli4_hba.fcp_cq[x];
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tFCP CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
- /* Get fast-path event queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Fast-path EQ information:\n");
- if (phba->sli4_hba.fp_eq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
- fcp_qidx++) {
- if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+
+ /* Fast-path FCP WQ */
+ qp = phba->sli4_hba.fcp_wq[x];
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tFCP WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+
+ if (x)
+ continue;
+
+ /* Only EQ 0 has slow path CQs configured */
+
+ /* Slow-path mailbox CQ */
+ qp = phba->sli4_hba.mbx_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tMBX CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ-STAT[mbox:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tEQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
- phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
- phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
- phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
- phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
- phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
}
- }
- }
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
-
- /* Get mailbox complete queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path MBX CQ information:\n");
- if (phba->sli4_hba.mbx_cq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated EQID[%02d]:\n",
- phba->sli4_hba.mbx_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
- phba->sli4_hba.mbx_cq->queue_id,
- phba->sli4_hba.mbx_cq->entry_count,
- phba->sli4_hba.mbx_cq->entry_size,
- phba->sli4_hba.mbx_cq->host_index,
- phba->sli4_hba.mbx_cq->hba_index);
- }
- /* Get slow-path complete queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path ELS CQ information:\n");
- if (phba->sli4_hba.els_cq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated EQID[%02d]:\n",
- phba->sli4_hba.els_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID [%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
- phba->sli4_hba.els_cq->queue_id,
- phba->sli4_hba.els_cq->entry_count,
- phba->sli4_hba.els_cq->entry_size,
- phba->sli4_hba.els_cq->host_index,
- phba->sli4_hba.els_cq->hba_index);
- }
+ /* Slow-path MBOX MQ */
+ qp = phba->sli4_hba.mbx_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tMBX MQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
- /* Get fast-path complete queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Fast-path FCP CQ information:\n");
- fcp_qidx = 0;
- if (phba->sli4_hba.fcp_cq) {
- do {
- if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+ /* Slow-path ELS response CQ */
+ qp = phba->sli4_hba.els_cq;
+ if (qp) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated EQID[%02d]:\n",
- phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+ "\tELS CQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
- phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
- phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
- phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
- phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
- phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+ "AssocEQID[%02d]: "
+ "CQ-STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID [%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
}
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- }
- /* Get mailbox queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path MBX MQ information:\n");
- if (phba->sli4_hba.mbx_wq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated CQID[%02d]:\n",
- phba->sli4_hba.mbx_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tWQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
- phba->sli4_hba.mbx_wq->queue_id,
- phba->sli4_hba.mbx_wq->entry_count,
- phba->sli4_hba.mbx_wq->entry_size,
- phba->sli4_hba.mbx_wq->host_index,
- phba->sli4_hba.mbx_wq->hba_index);
- }
+ /* Slow-path ELS WQ */
+ qp = phba->sli4_hba.els_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tELS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ " WQ-STAT[oflow:x%x "
+ "posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
- /* Get slow-path work queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path ELS WQ information:\n");
- if (phba->sli4_hba.els_wq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated CQID[%02d]:\n",
- phba->sli4_hba.els_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tWQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
- phba->sli4_hba.els_wq->queue_id,
- phba->sli4_hba.els_wq->entry_count,
- phba->sli4_hba.els_wq->entry_size,
- phba->sli4_hba.els_wq->host_index,
- phba->sli4_hba.els_wq->hba_index);
- }
+ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+ /* Slow-path RQ header */
+ qp = phba->sli4_hba.hdr_rq;
- /* Get fast-path work queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Fast-path FCP WQ information:\n");
- if (phba->sli4_hba.fcp_wq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
- fcp_qidx++) {
- if (!phba->sli4_hba.fcp_wq[fcp_qidx])
- continue;
- len += snprintf(pbuffer+len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tRQ info: ");
+ len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated CQID[%02d]:\n",
- phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len,
+ "AssocCQID[%02d]: "
+ "RQ-STAT[nopost:x%x nobuf:x%x "
+ "trunc:x%x rcv:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tWQID[%02d], "
- "QE-COUNT[%04d], WQE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
- phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
- phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
- phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
- phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
- phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ "\t\tHQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Slow-path RQ data */
+ qp = phba->sli4_hba.dat_rq;
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tDQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
}
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
- /* Get receive queue information */
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Slow-path RQ information:\n");
- if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "Associated CQID[%02d]:\n",
- phba->sli4_hba.hdr_rq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tHQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
- phba->sli4_hba.hdr_rq->queue_id,
- phba->sli4_hba.hdr_rq->entry_count,
- phba->sli4_hba.hdr_rq->entry_size,
- phba->sli4_hba.hdr_rq->host_index,
- phba->sli4_hba.hdr_rq->hba_index);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tDQID[%02d], "
- "QE-COUNT[%04d], QE-SIZE[%04d], "
- "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
- phba->sli4_hba.dat_rq->queue_id,
- phba->sli4_hba.dat_rq->entry_count,
- phba->sli4_hba.dat_rq->entry_size,
- phba->sli4_hba.dat_rq->host_index,
- phba->sli4_hba.dat_rq->hba_index);
- }
+ spin_unlock_irq(&phba->hbalock);
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+too_big:
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
+ spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@@ -2408,31 +2509,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
- /* Slow-path event queue */
- if (phba->sli4_hba.sp_eq &&
- phba->sli4_hba.sp_eq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.sp_eq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.sp_eq;
- goto pass_check;
- }
- /* Fast-path event queue */
- if (phba->sli4_hba.fp_eq) {
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
- if (phba->sli4_hba.fp_eq[qidx] &&
- phba->sli4_hba.fp_eq[qidx]->queue_id ==
+ /* HBA event queue */
+ if (phba->sli4_hba.hba_eq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
+ if (phba->sli4_hba.hba_eq[qidx] &&
+ phba->sli4_hba.hba_eq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.fp_eq[qidx],
+ phba->sli4_hba.hba_eq[qidx],
index, count);
if (rc)
goto error_out;
idiag.ptr_private =
- phba->sli4_hba.fp_eq[qidx];
+ phba->sli4_hba.hba_eq[qidx];
goto pass_check;
}
}
@@ -2479,7 +2570,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- } while (++qidx < phba->cfg_fcp_eq_count);
+ } while (++qidx < phba->cfg_fcp_io_channel);
}
goto error_out;
break;
@@ -2511,7 +2602,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
- for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
if (!phba->sli4_hba.fcp_wq[qidx])
continue;
if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@@ -4490,7 +4582,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_wq(phba);
lpfc_debug_dump_els_wq(phba);
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
lpfc_debug_dump_hdr_rq(phba);
@@ -4501,14 +4593,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_cq(phba);
lpfc_debug_dump_els_cq(phba);
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
/*
* Dump Event Queues (EQs)
*/
- lpfc_debug_dump_sp_eq(phba);
-
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
- lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
+ lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index afe368fd1b98..8b2b6a3bfc25 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -36,6 +36,9 @@
/* dumpHostSlim output buffer size */
#define LPFC_DUMPHOSTSLIM_SIZE 4096
+/* dumpSLIqinfo output buffer size */
+#define LPFC_DUMPSLIQINFO_SIZE 4096
+
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
@@ -366,7 +369,7 @@ static inline void
lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
{
/* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@@ -388,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
int fcp_cqidx, fcp_cqid;
/* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
if (phba->intr_type == MSIX) {
- if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
@@ -410,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
}
/**
- * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
+ * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
* @phba: Pointer to HBA context object.
* @fcp_wqidx: Index to a FCP work queue.
*
@@ -418,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
* associated to the FCP work queue specified by the @fcp_wqidx.
**/
static inline void
-lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
+lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
{
struct lpfc_queue *qdesc;
int fcp_eqidx, fcp_eqid;
int fcp_cqidx, fcp_cqid;
/* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
if (phba->intr_type == MSIX) {
- if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
return;
}
- if (phba->cfg_fcp_eq_count == 0) {
- fcp_eqidx = -1;
- fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
- qdesc = phba->sli4_hba.sp_eq;
- } else {
- fcp_eqidx = fcp_cqidx;
- fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
- qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
- }
+ fcp_eqidx = fcp_cqidx;
+ fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
+ qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
"EQ[Idx:%d|Qid:%d]\n",
@@ -543,25 +540,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
}
/**
- * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the slow-path event queue.
- **/
-static inline void
-lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
-{
- printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
- "EQ[Qid:%d]:\n",
- phba->sli4_hba.mbx_wq->queue_id,
- phba->sli4_hba.els_wq->queue_id,
- phba->sli4_hba.mbx_cq->queue_id,
- phba->sli4_hba.els_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
-}
-
-/**
* lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
* @phba: Pointer to HBA context object.
* @qid: Work queue identifier.
@@ -574,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{
int wq_idx;
- for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
+ for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
break;
- if (wq_idx < phba->cfg_fcp_wq_count) {
+ if (wq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
return;
@@ -644,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
do {
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
break;
- } while (++cq_idx < phba->cfg_fcp_eq_count);
+ } while (++cq_idx < phba->cfg_fcp_io_channel);
- if (cq_idx < phba->cfg_fcp_eq_count) {
+ if (cq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
return;
@@ -677,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
- for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
- if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
+ for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
+ if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break;
}
- if (eq_idx < phba->cfg_fcp_eq_count) {
+ if (eq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return;
}
- if (phba->sli4_hba.sp_eq->queue_id == qid) {
- printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
- lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
- }
}
void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1d84b63fccad..af49fb03dbb8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -145,6 +145,7 @@ struct lpfc_node_rrq {
#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
+#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
ACC */
#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@@ -201,10 +202,11 @@ struct lpfc_node_rrq {
#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
-#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
-#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
-#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
-#define NLP_STE_MAX_STATE 0x8
+#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */
+#define NLP_STE_MAX_STATE 0x9
#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
/* For UNUSED_NODE state, the node has just been allocated.
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d54ae1999797..cfe533bc9790 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -962,7 +962,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
(irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
phba->fcoe_cvl_eventtag_attn =
@@ -1108,8 +1109,10 @@ flogifail:
/* Start discovery */
lpfc_disc_start(vport);
} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
- (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
+ (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_SLI_ABORTED) &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
lpfc_issue_clear_la(phba, vport);
@@ -1476,6 +1479,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return ndlp;
memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
+ ndlp, ndlp->nlp_DID, new_ndlp);
+
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
@@ -1527,6 +1534,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* The new_ndlp is replacing ndlp totally, so we need
* to put ndlp on UNUSED list and try to free it.
*/
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3179 PLOGI confirm NEW: %x %x\n",
+ new_ndlp->nlp_DID, keepDID);
/* Fix up the rport accordingly */
rport = ndlp->rport;
@@ -1559,23 +1569,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_drop_node(vport, ndlp);
}
else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3180 PLOGI confirm SWAP: %x %x\n",
+ new_ndlp->nlp_DID, keepDID);
+
lpfc_unreg_rpi(vport, ndlp);
+
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
if (phba->sli_rev == LPFC_SLI_REV4)
memcpy(&ndlp->active_rrqs.xri_bitmap,
&rrq.xri_bitmap,
sizeof(ndlp->active_rrqs.xri_bitmap));
+
/* Since we are swapping the ndlp passed in with the new one
- * and the did has already been swapped, copy over the
- * state and names.
+ * and the did has already been swapped, copy over state.
+ * The new WWNs are already in new_ndlp since thats what
+ * we looked it up by in the begining of this routine.
*/
- memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
- sizeof(struct lpfc_name));
- memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
- sizeof(struct lpfc_name));
new_ndlp->nlp_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ /* Since we are switching over to the new_ndlp, the old
+ * ndlp should be put in the NPR state, unless we have
+ * already started re-discovery on it.
+ */
+ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
@@ -2367,6 +2388,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_sli *psli;
struct lpfcMboxq *mbox;
+ unsigned long flags;
+ uint32_t skip_recovery = 0;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2381,47 +2404,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"LOGO cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
+
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, vport->num_disc_nodes);
- /* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(vport))
+
+ if (lpfc_els_chk_latt(vport)) {
+ skip_recovery = 1;
goto out;
+ }
+ /* Check to see if link went down during discovery */
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
/* NLP_EVT_DEVICE_RM should unregister the RPI
* which should abort all outstanding IOs.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
+ skip_recovery = 1;
goto out;
}
if (irsp->ulpStatus) {
/* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
+ skip_recovery = 1;
goto out;
+ }
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2756 LOGO failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
+ if (lpfc_error_lost_link(irsp)) {
+ skip_recovery = 1;
goto out;
- else
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_LOGO);
- } else
- /* Good status, call state machine.
- * This will unregister the rpi if needed.
- */
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_LOGO);
+ }
+ }
+
+ /* Call state machine. This will unregister the rpi if needed. */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
out:
lpfc_els_free_iocb(phba, cmdiocb);
/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
@@ -2436,9 +2464,30 @@ out:
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
+ skip_recovery = 1;
}
}
}
+
+ /*
+ * If the node is a target, the handling attempts to recover the port.
+ * For any other port type, the rpi is unregistered as an implicit
+ * LOGO.
+ */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3187 LOGO completes to NPort x%x: Start "
+ "Recovery Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
+ lpfc_disc_start(vport);
+ }
return;
}
@@ -2501,10 +2550,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue LOGO: did:x%x",
ndlp->nlp_DID, 0, 0);
+ /*
+ * If we are issuing a LOGO, we may try to recover the remote NPort
+ * by issuing a PLOGI later. Even though we issue ELS cmds by the
+ * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
+ * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
+ * for that ELS cmd. To avoid this situation, lets get rid of the
+ * RPI right now, before any ELS cmds are sent.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_unreg_rpi(vport, ndlp)) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+ }
+
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(shost->host_lock);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -2920,7 +2986,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
case ELS_CMD_LOGO:
if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
}
break;
case ELS_CMD_FDISC:
@@ -3007,7 +3073,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
break;
case IOSTAT_LOCAL_REJECT:
- switch ((irsp->un.ulpWord[4] & 0xff)) {
+ switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
if (cmd == ELS_CMD_FLOGI) {
if (PCI_DEVICE_ID_HORNET ==
@@ -3214,7 +3280,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
/* If discovery / RSCN timer is running, reset it */
@@ -3273,7 +3340,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return 1;
case ELS_CMD_LOGO:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
return 1;
}
@@ -3533,13 +3600,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_put(ndlp);
- /* This is the end of the default RPI cleanup logic for this
- * ndlp. If no other discovery threads are using this ndlp.
- * we should free all resources associated with it.
- */
- lpfc_nlp_not_used(ndlp);
+ if (ndlp) {
+ if (NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_nlp_put(ndlp);
+ /* This is the end of the default RPI cleanup logic for
+ * this ndlp. If no other discovery threads are using
+ * this ndlp, free all resources associated with it.
+ */
+ lpfc_nlp_not_used(ndlp);
+ } else {
+ lpfc_drop_node(ndlp->vport, ndlp);
+ }
}
return;
@@ -6803,7 +6874,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
+ (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -7985,3 +8057,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
+
+/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted node.
+ *
+ * The driver calls this routine in response to an SLI4 XRI ABORT CQE
+ * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
+ * the driver is required to send a LOGO to the remote node before it
+ * attempts to recover its login to the remote node.
+ */
+void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+ unsigned long flags = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba = vport->phba;
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI, "3093 No rport recovery needed. "
+ "rport in state 0x%x\n", ndlp->nlp_state);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+ shost->host_no, ndlp->nlp_DID,
+ vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ /*
+ * The rport is not responding. Remove the FCP-2 flag to prevent
+ * an ADISC in the follow-up recovery code.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9b4f92941dce..e9845d2ecf10 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
"rport devlosscb: sid:x%x did:x%x flg:x%x",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
/* Don't defer this if we are in the process of deleting the vport
* or unloading the driver. The unload will cleanup the node
* appropriately we just need to cleanup the ndlp rport info here.
@@ -142,6 +146,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+
+ /* If the WWPN of the rport and ndlp don't match, ignore it */
+ if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
+ put_device(&rport->dev);
+ return;
+ }
+ }
+
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp))
@@ -202,6 +215,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
"rport devlosstmo:did:x%x type:x%x id:x%x",
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
/* Don't defer this if we are in the process of deleting the vport
* or unloading the driver. The unload will cleanup the node
* appropriately we just need to cleanup the ndlp rport info here.
@@ -3492,7 +3509,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
struct static_vport_info *vport_info;
- int rc = 0, i;
+ int mbx_wait_rc = 0, i;
struct fc_vport_identifiers vport_id;
struct fc_vport *new_fc_vport;
struct Scsi_Host *shost;
@@ -3509,7 +3526,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
" allocate mailbox memory\n");
return;
}
-
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb = &pmb->u.mb;
vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
@@ -3523,24 +3540,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
+ /* free dma buffer from previous round */
+ if (pmb->context1) {
+ mp = (struct lpfc_dmabuf *)pmb->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
pmb->vport = phba->pport;
- rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+ mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
+ LPFC_MBOX_TMO);
- if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+ if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0544 lpfc_create_static_vport failed to"
" issue dump mailbox command ret 0x%x "
"status 0x%x\n",
- rc, mb->mbxStatus);
+ mbx_wait_rc, mb->mbxStatus);
goto out;
}
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *) pmb->context2;
+ mp = (struct lpfc_dmabuf *)pmb->context1;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -3604,9 +3628,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
- if (rc != MBX_TIMEOUT) {
- if (pmb->context2) {
- mp = (struct lpfc_dmabuf *) pmb->context2;
+ if (mbx_wait_rc != MBX_TIMEOUT) {
+ if (pmb->context1) {
+ mp = (struct lpfc_dmabuf *)pmb->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
@@ -3834,6 +3858,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3183 rport register x%06x, rport %p role x%x\n",
+ ndlp->nlp_DID, rport, rport_ids.roles);
+
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
@@ -3850,6 +3878,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
"rport delete: did:x%x flg:x%x type x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3184 rport unregister x%06x, rport %p\n",
+ ndlp->nlp_DID, rport);
+
fc_remote_port_delete(rport);
return;
@@ -3964,6 +3996,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
[NLP_STE_ADISC_ISSUE] = "ADISC",
[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
[NLP_STE_PRLI_ISSUE] = "PRLI",
+ [NLP_STE_LOGO_ISSUE] = "LOGO",
[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
[NLP_STE_MAPPED_NODE] = "MAPPED",
[NLP_STE_NPR_NODE] = "NPR",
@@ -4330,6 +4363,26 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
return 0;
}
+/**
+ * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function will issue an ELS LOGO command after completing
+ * the UNREG_RPI.
+ **/
+void
+lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *)(pmb->context1);
+ if (!ndlp)
+ return;
+ lpfc_issue_els_logo(vport, ndlp, 0);
+}
+
/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
@@ -4354,9 +4407,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->context1 = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+ } else {
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
mempool_free(mbox, phba->mbox_mem_pool);
@@ -4499,9 +4559,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_disable_node(vport, ndlp);
}
+
+ /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
+
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4512,6 +4576,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Cleanup REG_LOGIN completions which are not yet processed */
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+ (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
(ndlp != (struct lpfc_nodelist *) mb->context2))
continue;
@@ -4521,6 +4586,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
@@ -4585,7 +4651,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
mbox->vport = vport;
- mbox->context2 = NULL;
+ mbox->context2 = ndlp;
rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -5365,9 +5431,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (filter(ndlp, param))
+ if (filter(ndlp, param)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "3185 FIND node filter %p DID "
+ "Data: x%p x%x x%x\n",
+ filter, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag);
return ndlp;
+ }
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "3186 FIND node filter %p NOT FOUND.\n", filter);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 41bb1d2fb625..7398ca862e97 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1188,8 +1188,8 @@ typedef struct {
*/
/* Number of rings currently used and available. */
-#define MAX_CONFIGURED_RINGS 3
-#define MAX_RINGS 4
+#define MAX_SLI3_CONFIGURED_RINGS 3
+#define MAX_SLI3_RINGS 4
/* IOCB / Mailbox is owned by FireFly */
#define OWN_CHIP 1
@@ -1251,6 +1251,8 @@ typedef struct {
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
#define PCI_DEVICE_ID_TIGERSHARK 0x0704
#define PCI_DEVICE_ID_TOMCAT 0x0714
+#define PCI_DEVICE_ID_SKYHAWK 0x0724
+#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1458,6 +1460,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_UNREG_FCFI 0xA2
#define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4
+#define MBX_ACCESS_VDATA 0xA5
#define MBX_AUTH_PORT 0xF8
#define MBX_SECURITY_MGMT 0xF9
@@ -2991,7 +2994,7 @@ typedef struct _PCB {
uint32_t pgpAddrLow;
uint32_t pgpAddrHigh;
- SLI2_RDSC rdsc[MAX_RINGS];
+ SLI2_RDSC rdsc[MAX_SLI3_RINGS];
} PCB_t;
/* NEW_FEATURE */
@@ -3101,18 +3104,18 @@ struct lpfc_pgp {
struct sli2_desc {
uint32_t unused1[16];
- struct lpfc_hgp host[MAX_RINGS];
- struct lpfc_pgp port[MAX_RINGS];
+ struct lpfc_hgp host[MAX_SLI3_RINGS];
+ struct lpfc_pgp port[MAX_SLI3_RINGS];
};
struct sli3_desc {
- struct lpfc_hgp host[MAX_RINGS];
+ struct lpfc_hgp host[MAX_SLI3_RINGS];
uint32_t reserved[8];
uint32_t hbq_put[16];
};
struct sli3_pgp {
- struct lpfc_pgp port[MAX_RINGS];
+ struct lpfc_pgp port[MAX_SLI3_RINGS];
uint32_t hbq_get[16];
};
@@ -3242,6 +3245,7 @@ typedef struct {
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
#define IOERR_SLI_ABORTED 0x103
+#define IOERR_PARAM_MASK 0x1ff
} PARM_ERR;
typedef union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 953603a7a43c..834b699cac76 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -187,11 +187,17 @@ struct lpfc_sli_intf {
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
+/* Algrithmns for scheduling FCP commands to WQs */
+#define LPFC_FCP_SCHED_ROUND_ROBIN 0
+#define LPFC_FCP_SCHED_BY_CPU 1
+
/* Delay Multiplier constant */
#define LPFC_DMULT_CONST 651042
-#define LPFC_MIM_IMAX 636
-#define LPFC_FP_DEF_IMAX 10000
-#define LPFC_SP_DEF_IMAX 10000
+
+/* Configuration of Interrupts / sec for entire HBA port */
+#define LPFC_MIN_IMAX 5000
+#define LPFC_MAX_IMAX 5000000
+#define LPFC_DEF_IMAX 50000
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
@@ -338,7 +344,7 @@ struct lpfc_cqe {
* Define mask value for xri_aborted and wcqe completed CQE extended status.
* Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
*/
-#define WCQE_PARAM_MASK 0x1FF;
+#define WCQE_PARAM_MASK 0x1FF
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
@@ -880,13 +886,19 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
+#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
+#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1
#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
@@ -1382,6 +1394,11 @@ struct lpfc_mbx_set_link_diag_state {
#define lpfc_mbx_set_diag_state_diag_SHIFT 0
#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2
+#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1
#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
#define lpfc_mbx_set_diag_state_link_num_WORD word0
@@ -2556,7 +2573,7 @@ struct lpfc_mbx_get_sli4_parameters {
};
struct lpfc_rscr_desc_generic {
-#define LPFC_RSRC_DESC_WSIZE 18
+#define LPFC_RSRC_DESC_WSIZE 22
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
};
@@ -2566,6 +2583,9 @@ struct lpfc_rsrc_desc_pcie {
#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
#define lpfc_rsrc_desc_pcie_type_WORD word0
#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+#define lpfc_rsrc_desc_pcie_length_SHIFT 8
+#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_length_WORD word0
uint32_t word1;
#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
@@ -2593,6 +2613,12 @@ struct lpfc_rsrc_desc_fcfcoe {
#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_length_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88
uint32_t word1;
#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
@@ -2651,6 +2677,12 @@ struct lpfc_rsrc_desc_fcfcoe {
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
+ uint32_t bw_min;
+ uint32_t bw_max;
+ uint32_t iops_min;
+ uint32_t iops_max;
+ uint32_t reserved[4];
};
struct lpfc_func_cfg {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 45c15208be9f..654564fead6c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -480,11 +480,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ELS ring till hba_state is READY */
- if (psli->ring[psli->extra_ring].cmdringaddr)
+ if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
- if (psli->ring[psli->fcp_ring].cmdringaddr)
+ if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
- if (psli->ring[psli->next_ring].cmdringaddr)
+ if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
/* Post receive buffers for desired rings */
@@ -2059,6 +2059,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
oneConnect = 1;
m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
break;
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
+ break;
default:
m = (typeof(m)){"Unknown", "", ""};
break;
@@ -4546,6 +4551,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
}
+ if (!phba->sli.ring)
+ phba->sli.ring = (struct lpfc_sli_ring *)
+ kzalloc(LPFC_SLI3_MAX_RING *
+ sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+ if (!phba->sli.ring)
+ return -ENOMEM;
+
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4690,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
+
+ /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+ phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
+
/* This will be set to correct value after the read_config mbox */
phba->max_vports = 0;
@@ -4705,6 +4721,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sges_per_segment = 2;
/*
+ * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
+ * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
+ */
+ if (!phba->sli.ring)
+ phba->sli.ring = kzalloc(
+ (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
+ sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+ if (!phba->sli.ring)
+ return -ENOMEM;
+ /*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
@@ -4909,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs;
}
- /*
- * The cfg_fcp_eq_count can be zero whenever there is exactly one
- * interrupt vector. This is not an error
- */
- if (phba->cfg_fcp_eq_count) {
- phba->sli4_hba.fcp_eq_hdl =
- kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_eq_hdl) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2572 Failed allocate memory for "
- "fast-path per-EQ handle array\n");
- rc = -ENOMEM;
- goto out_free_fcf_rr_bmask;
- }
+ phba->sli4_hba.fcp_eq_hdl =
+ kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_eq_hdl) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2572 Failed allocate memory for "
+ "fast-path per-EQ handle array\n");
+ rc = -ENOMEM;
+ goto out_free_fcf_rr_bmask;
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -5550,6 +5570,10 @@ lpfc_hba_free(struct lpfc_hba *phba)
/* Release the driver assigned board number */
idr_remove(&lpfc_hba_index, phba->brd_no);
+ /* Free memory allocated with sli rings */
+ kfree(phba->sli.ring);
+ phba->sli.ring = NULL;
+
kfree(phba);
return;
}
@@ -6275,8 +6299,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
uint32_t shdr_status, shdr_add_status;
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
+ char *pdesc_0;
uint32_t desc_count;
- int length, i, rc = 0;
+ int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -6388,18 +6413,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
length, LPFC_SLI4_MBX_EMBED);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *)
&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc || shdr_status || shdr_add_status) {
+ if (rc2 || shdr_status || shdr_add_status) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3026 Mailbox failed , mbxCmd x%x "
"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &pmb->u.mqe),
bf_get(lpfc_mqe_status, &pmb->u.mqe));
- rc = -EIO;
goto read_cfg_out;
}
@@ -6407,11 +6431,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+ pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
+ length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
+ if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
+ length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
+ else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
+ goto read_cfg_out;
+
for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
- desc = (struct lpfc_rsrc_desc_fcfcoe *)
- &get_func_cfg->func_cfg.desc[i];
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
- bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
phba->sli4_hba.iov.pf_number =
bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
phba->sli4_hba.iov.vf_number =
@@ -6425,13 +6456,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
phba->sli4_hba.iov.vf_number);
- else {
+ else
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3028 GET_FUNCTION_CONFIG: failed to find "
"Resrouce Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
- rc = -EIO;
- }
read_cfg_out:
mempool_free(pmb, phba->mbox_mem_pool);
@@ -6512,53 +6541,40 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
static int
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{
- int cfg_fcp_wq_count;
- int cfg_fcp_eq_count;
+ int cfg_fcp_io_channel;
+ uint32_t cpu;
+ uint32_t i = 0;
+
/*
- * Sanity check for confiugred queue parameters against the run-time
+ * Sanity check for configured queue parameters against the run-time
* device parameters
*/
- /* Sanity check on FCP fast-path WQ parameters */
- cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
- if (cfg_fcp_wq_count >
- (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
- cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
- LPFC_SP_WQN_DEF;
- if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2581 Not enough WQs (%d) from "
- "the pci function for supporting "
- "FCP WQs (%d)\n",
- phba->sli4_hba.max_cfg_param.max_wq,
- phba->cfg_fcp_wq_count);
- goto out_error;
- }
+ /* Sanity check on HBA EQ parameters */
+ cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
+
+ /* It doesn't make sense to have more io channels then CPUs */
+ for_each_online_cpu(cpu) {
+ i++;
+ }
+ if (i < cfg_fcp_io_channel) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2582 Not enough WQs (%d) from the pci "
- "function for supporting the requested "
- "FCP WQs (%d), the actual FCP WQs can "
- "be supported: %d\n",
- phba->sli4_hba.max_cfg_param.max_wq,
- phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
- }
- /* The actual number of FCP work queues adopted */
- phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
-
- /* Sanity check on FCP fast-path EQ parameters */
- cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
- if (cfg_fcp_eq_count >
- (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
- cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
- LPFC_SP_EQN_DEF;
- if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+ "3188 Reducing IO channels to match number of "
+ "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+ cfg_fcp_io_channel = i;
+ }
+
+ if (cfg_fcp_io_channel >
+ phba->sli4_hba.max_cfg_param.max_eq) {
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+ if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 Not enough EQs (%d) from the "
"pci function for supporting FCP "
"EQs (%d)\n",
phba->sli4_hba.max_cfg_param.max_eq,
- phba->cfg_fcp_eq_count);
+ phba->cfg_fcp_io_channel);
goto out_error;
}
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6567,22 +6583,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
"FCP EQs (%d), the actual FCP EQs can "
"be supported: %d\n",
phba->sli4_hba.max_cfg_param.max_eq,
- phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
- }
- /* It does not make sense to have more EQs than WQs */
- if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2593 The FCP EQ count(%d) cannot be greater "
- "than the FCP WQ count(%d), limiting the "
- "FCP EQ count to %d\n", cfg_fcp_eq_count,
- phba->cfg_fcp_wq_count,
- phba->cfg_fcp_wq_count);
- cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+ phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
}
+
+ /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+
/* The actual number of FCP event queues adopted */
- phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
- /* The overall number of event queues used */
- phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+ phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
+ phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
+ phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6615,50 +6625,104 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+ int idx;
/*
- * Create Event Queues (EQs)
+ * Create HBA Record arrays.
*/
+ if (!phba->cfg_fcp_io_channel)
+ return -ERANGE;
- /* Create slow path event queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
- phba->sli4_hba.eq_ecount);
- if (!qdesc) {
+ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+ phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+ phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+ phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+ phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.hba_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2576 Failed allocate memory for "
+ "fast-path EQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0496 Failed allocate slow-path EQ\n");
+ "2577 Failed allocate memory for fast-path "
+ "CQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2578 Failed allocate memory for fast-path "
+ "WQ record array\n");
goto out_error;
}
- phba->sli4_hba.sp_eq = qdesc;
/*
- * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
- * zero whenever there is exactly one interrupt vector. This is not
- * an error.
+ * Since the first EQ can have multiple CQs associated with it,
+ * this array is used to quickly see if we have a FCP fast-path
+ * CQ match.
*/
- if (phba->cfg_fcp_eq_count) {
- phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fp_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2576 Failed allocate memory for "
- "fast-path EQ record array\n");
- goto out_free_sp_eq;
- }
+ phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2545 Failed allocate memory for fast-path "
+ "CQ map\n");
+ goto out_error;
}
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+
+ /*
+ * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
+ * how many EQs to create.
+ */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+
+ /* Create EQs */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0497 Failed allocate fast-path EQ\n");
- goto out_free_fp_eq;
+ "0497 Failed allocate EQ (%d)\n", idx);
+ goto out_error;
}
- phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+ phba->sli4_hba.hba_eq[idx] = qdesc;
+
+ /* Create Fast Path FCP CQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0499 Failed allocate fast-path FCP "
+ "CQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_cq[idx] = qdesc;
+
+ /* Create Fast Path FCP WQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0503 Failed allocate fast-path FCP "
+ "WQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_wq[idx] = qdesc;
}
+
/*
- * Create Complete Queues (CQs)
+ * Create Slow Path Completion Queues (CQs)
*/
/* Create slow-path Mailbox Command Complete Queue */
@@ -6667,7 +6731,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0500 Failed allocate slow-path mailbox CQ\n");
- goto out_free_fp_eq;
+ goto out_error;
}
phba->sli4_hba.mbx_cq = qdesc;
@@ -6677,59 +6741,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0501 Failed allocate slow-path ELS CQ\n");
- goto out_free_mbx_cq;
+ goto out_error;
}
phba->sli4_hba.els_cq = qdesc;
/*
- * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
- * If there are no FCP EQs then create exactly one FCP CQ.
+ * Create Slow Path Work Queues (WQs)
*/
- if (phba->cfg_fcp_eq_count)
- phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count),
- GFP_KERNEL);
- else
- phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2577 Failed allocate memory for fast-path "
- "CQ record array\n");
- goto out_free_els_cq;
- }
- fcp_cqidx = 0;
- do {
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP "
- "CQ (%d)\n", fcp_cqidx);
- goto out_free_fcp_cq;
- }
- phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
- } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/* Create Mailbox Command Queue */
- phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
- phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0505 Failed allocate slow-path MQ\n");
- goto out_free_fcp_cq;
+ goto out_error;
}
phba->sli4_hba.mbx_wq = qdesc;
/*
- * Create all the Work Queues (WQs)
+ * Create ELS Work Queues
*/
- phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
- phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
/* Create slow-path ELS Work Queue */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6737,36 +6771,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0504 Failed allocate slow-path ELS WQ\n");
- goto out_free_mbx_wq;
+ goto out_error;
}
phba->sli4_hba.els_wq = qdesc;
- /* Create fast-path FCP Work Queue(s) */
- phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_wq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2578 Failed allocate memory for fast-path "
- "WQ record array\n");
- goto out_free_els_wq;
- }
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP "
- "WQ (%d)\n", fcp_wqidx);
- goto out_free_fcp_wq;
- }
- phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
- }
-
/*
* Create Receive Queue (RQ)
*/
- phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
- phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
/* Create Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6774,7 +6785,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0506 Failed allocate receive HRQ\n");
- goto out_free_fcp_wq;
+ goto out_error;
}
phba->sli4_hba.hdr_rq = qdesc;
@@ -6784,52 +6795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0507 Failed allocate receive DRQ\n");
- goto out_free_hdr_rq;
+ goto out_error;
}
phba->sli4_hba.dat_rq = qdesc;
return 0;
-out_free_hdr_rq:
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
- phba->sli4_hba.hdr_rq = NULL;
-out_free_fcp_wq:
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
- phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
- }
- kfree(phba->sli4_hba.fcp_wq);
- phba->sli4_hba.fcp_wq = NULL;
-out_free_els_wq:
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
- phba->sli4_hba.els_wq = NULL;
-out_free_mbx_wq:
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
- phba->sli4_hba.mbx_wq = NULL;
-out_free_fcp_cq:
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
- phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
- }
- kfree(phba->sli4_hba.fcp_cq);
- phba->sli4_hba.fcp_cq = NULL;
-out_free_els_cq:
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
- phba->sli4_hba.els_cq = NULL;
-out_free_mbx_cq:
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
- phba->sli4_hba.mbx_cq = NULL;
-out_free_fp_eq:
- for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
- phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
- }
- kfree(phba->sli4_hba.fp_eq);
- phba->sli4_hba.fp_eq = NULL;
-out_free_sp_eq:
- lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
- phba->sli4_hba.sp_eq = NULL;
out_error:
+ lpfc_sli4_queue_destroy(phba);
return -ENOMEM;
}
@@ -6848,58 +6821,86 @@ out_error:
void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
- int fcp_qidx;
+ int idx;
+
+ if (phba->sli4_hba.hba_eq != NULL) {
+ /* Release HBA event queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.hba_eq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.hba_eq[idx]);
+ phba->sli4_hba.hba_eq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.hba_eq);
+ phba->sli4_hba.hba_eq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_cq != NULL) {
+ /* Release FCP completion queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_cq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[idx]);
+ phba->sli4_hba.fcp_cq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_wq != NULL) {
+ /* Release FCP work queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_wq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_wq[idx]);
+ phba->sli4_hba.fcp_wq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
+ }
+
+ /* Release FCP CQ mapping array */
+ if (phba->sli4_hba.fcp_cq_map != NULL) {
+ kfree(phba->sli4_hba.fcp_cq_map);
+ phba->sli4_hba.fcp_cq_map = NULL;
+ }
/* Release mailbox command work queue */
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
- phba->sli4_hba.mbx_wq = NULL;
+ if (phba->sli4_hba.mbx_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+ phba->sli4_hba.mbx_wq = NULL;
+ }
/* Release ELS work queue */
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
- phba->sli4_hba.els_wq = NULL;
-
- /* Release FCP work queue */
- if (phba->sli4_hba.fcp_wq != NULL)
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
- fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
- kfree(phba->sli4_hba.fcp_wq);
- phba->sli4_hba.fcp_wq = NULL;
+ if (phba->sli4_hba.els_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+ phba->sli4_hba.els_wq = NULL;
+ }
/* Release unsolicited receive queue */
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
- phba->sli4_hba.hdr_rq = NULL;
- lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
- phba->sli4_hba.dat_rq = NULL;
+ if (phba->sli4_hba.hdr_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+ phba->sli4_hba.hdr_rq = NULL;
+ }
+ if (phba->sli4_hba.dat_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+ phba->sli4_hba.dat_rq = NULL;
+ }
/* Release ELS complete queue */
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
- phba->sli4_hba.els_cq = NULL;
+ if (phba->sli4_hba.els_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+ phba->sli4_hba.els_cq = NULL;
+ }
/* Release mailbox command complete queue */
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
- phba->sli4_hba.mbx_cq = NULL;
-
- /* Release FCP response complete queue */
- fcp_qidx = 0;
- if (phba->sli4_hba.fcp_cq != NULL)
- do
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
- while (++fcp_qidx < phba->cfg_fcp_eq_count);
- kfree(phba->sli4_hba.fcp_cq);
- phba->sli4_hba.fcp_cq = NULL;
-
- /* Release fast-path event queue */
- if (phba->sli4_hba.fp_eq != NULL)
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
- fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
- kfree(phba->sli4_hba.fp_eq);
- phba->sli4_hba.fp_eq = NULL;
-
- /* Release slow-path event queue */
- lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
- phba->sli4_hba.sp_eq = NULL;
+ if (phba->sli4_hba.mbx_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+ phba->sli4_hba.mbx_cq = NULL;
+ }
return;
}
@@ -6919,61 +6920,124 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
int
lpfc_sli4_queue_setup(struct lpfc_hba *phba)
{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
int rc = -ENOMEM;
int fcp_eqidx, fcp_cqidx, fcp_wqidx;
int fcp_cq_index = 0;
/*
- * Set up Event Queues (EQs)
+ * Set up HBA Event Queues (EQs)
*/
- /* Set up slow-path event queue */
- if (!phba->sli4_hba.sp_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0520 Slow-path EQ not allocated\n");
- goto out_error;
- }
- rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
- LPFC_SP_DEF_IMAX);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0521 Failed setup of slow-path EQ: "
- "rc = 0x%x\n", rc);
- goto out_error;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2583 Slow-path EQ setup: queue-id=%d\n",
- phba->sli4_hba.sp_eq->queue_id);
-
- /* Set up fast-path event queue */
- if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+ /* Set up HBA event queue */
+ if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
- goto out_destroy_sp_eq;
+ goto out_error;
}
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
- if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", fcp_eqidx);
rc = -ENOMEM;
- goto out_destroy_fp_eq;
+ goto out_destroy_hba_eq;
}
- rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
- phba->cfg_fcp_imax);
+ rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
+ (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0523 Failed setup of fast-path EQ "
"(%d), rc = 0x%x\n", fcp_eqidx, rc);
- goto out_destroy_fp_eq;
+ goto out_destroy_hba_eq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 Fast-path EQ setup: "
+ "2584 HBA EQ setup: "
"queue[%d]-id=%d\n", fcp_eqidx,
- phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+ phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
}
+ /* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_hba_eq;
+ }
+
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
+ if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0526 Fast-path FCP CQ (%d) not "
+ "allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0527 Failed setup of fast-path FCP "
+ "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+ goto out_destroy_fcp_cq;
+ }
+
+ /* Setup fcp_cq_map for fast lookup */
+ phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2588 FCP CQ setup: cq[%d]-id=%d, "
+ "parent seq[%d]-id=%d\n",
+ fcp_cqidx,
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ fcp_cqidx,
+ phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
+ }
+
+ /* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0534 Fast-path FCP WQ (%d) not "
+ "allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+ phba->sli4_hba.fcp_cq[fcp_wqidx],
+ LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0535 Failed setup of fast-path FCP "
+ "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+ goto out_destroy_fcp_wq;
+ }
+
+ /* Bind this WQ to the next FCP ring */
+ pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
+ pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2591 FCP WQ setup: wq[%d]-id=%d, "
+ "parent cq[%d]-id=%d\n",
+ fcp_wqidx,
+ phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cq_index,
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
+ }
/*
* Set up Complete Queues (CQs)
*/
@@ -6983,20 +7047,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 Mailbox CQ not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fp_eq;
+ goto out_destroy_fcp_wq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
- LPFC_MCQ, LPFC_MBOX);
+ rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0529 Failed setup of slow-path mailbox CQ: "
"rc = 0x%x\n", rc);
- goto out_destroy_fp_eq;
+ goto out_destroy_fcp_wq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
phba->sli4_hba.mbx_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
+ phba->sli4_hba.hba_eq[0]->queue_id);
/* Set up slow-path ELS Complete Queue */
if (!phba->sli4_hba.els_cq) {
@@ -7005,8 +7069,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_destroy_mbx_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
- LPFC_WCQ, LPFC_ELS);
+ rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0531 Failed setup of slow-path ELS CQ: "
@@ -7016,52 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
phba->sli4_hba.els_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
-
- /* Set up fast-path FCP Response Complete Queue */
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3148 Fast-path FCP CQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_els_cq;
- }
- fcp_cqidx = 0;
- do {
- if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0526 Fast-path FCP CQ (%d) not "
- "allocated\n", fcp_cqidx);
- rc = -ENOMEM;
- goto out_destroy_fcp_cq;
- }
- if (phba->cfg_fcp_eq_count)
- rc = lpfc_cq_create(phba,
- phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
- else
- rc = lpfc_cq_create(phba,
- phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.sp_eq,
- LPFC_WCQ, LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0527 Failed setup of fast-path FCP "
- "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
- goto out_destroy_fcp_cq;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent %seq[%d]-id=%d\n",
- fcp_cqidx,
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
- (phba->cfg_fcp_eq_count) ? "" : "sp_",
- fcp_cqidx,
- (phba->cfg_fcp_eq_count) ?
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
- phba->sli4_hba.sp_eq->queue_id);
- } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
+ phba->sli4_hba.hba_eq[0]->queue_id);
/*
* Set up all the Work Queues (WQs)
@@ -7072,7 +7091,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0538 Slow-path MQ not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fcp_cq;
+ goto out_destroy_els_cq;
}
rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7080,7 +7099,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0539 Failed setup of slow-path MQ: "
"rc = 0x%x\n", rc);
- goto out_destroy_fcp_cq;
+ goto out_destroy_els_cq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7102,49 +7121,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", rc);
goto out_destroy_mbx_wq;
}
+
+ /* Bind this WQ to the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
+ phba->sli4_hba.els_cq->pring = pring;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_cq->queue_id);
- /* Set up fast-path FCP Work Queue */
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3149 Fast-path FCP WQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_els_wq;
- }
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
- if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0534 Fast-path FCP WQ (%d) not "
- "allocated\n", fcp_wqidx);
- rc = -ENOMEM;
- goto out_destroy_fcp_wq;
- }
- rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
- phba->sli4_hba.fcp_cq[fcp_cq_index],
- LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0535 Failed setup of fast-path FCP "
- "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
- goto out_destroy_fcp_wq;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2591 FCP WQ setup: wq[%d]-id=%d, "
- "parent cq[%d]-id=%d\n",
- fcp_wqidx,
- phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
- fcp_cq_index,
- phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
- /* Round robin FCP Work Queue's Completion Queue assignment */
- if (phba->cfg_fcp_eq_count)
- fcp_cq_index = ((fcp_cq_index + 1) %
- phba->cfg_fcp_eq_count);
- }
-
/*
* Create Receive Queue (RQ)
*/
@@ -7152,7 +7139,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fcp_wq;
+ goto out_destroy_els_wq;
}
lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7175,25 +7162,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id);
return 0;
-out_destroy_fcp_wq:
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
-out_destroy_fcp_cq:
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
-out_destroy_fp_eq:
+out_destroy_fcp_wq:
+ for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_fcp_cq:
+ for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_hba_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
-out_destroy_sp_eq:
- lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
out_error:
return rc;
}
@@ -7222,27 +7207,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset unsolicited receive queue */
lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
/* Unset FCP work queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ }
/* Unset mailbox command complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
if (phba->sli4_hba.fcp_cq) {
- fcp_qidx = 0;
- do {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
}
/* Unset fast-path event queue */
- if (phba->sli4_hba.fp_eq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ if (phba->sli4_hba.hba_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
}
- /* Unset slow-path event queue */
- lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
/**
@@ -7590,10 +7575,11 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
length = (sizeof(struct lpfc_mbx_nop) -
sizeof(struct lpfc_sli4_cfg_mhdr));
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_NOP, length,
+ LPFC_SLI4_MBX_EMBED);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
@@ -8133,33 +8119,22 @@ enable_msix_vectors:
"message=%d\n", index,
phba->sli4_hba.msix_entries[index].vector,
phba->sli4_hba.msix_entries[index].entry);
+
/*
* Assign MSI-X vectors to interrupt handlers
*/
- if (vectors > 1)
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
- else
- /* All Interrupts need to be handled by one EQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_intr_handler, IRQF_SHARED,
- LPFC_DRIVER_NAME, phba);
- if (rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0485 MSI-X slow-path request_irq failed "
- "(%d)\n", rc);
- goto msi_fail_out;
- }
+ for (index = 0; index < vectors; index++) {
+ memset(&phba->sli4_hba.handler_name[index], 0, 16);
+ sprintf((char *)&phba->sli4_hba.handler_name[index],
+ LPFC_DRIVER_HANDLER_NAME"%d", index);
- /* The rest of the vector(s) are associated to fast-path handler(s) */
- for (index = 1; index < vectors; index++) {
- phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
- phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
- &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
- LPFC_FP_DRIVER_HANDLER_NAME,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -8173,12 +8148,9 @@ enable_msix_vectors:
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 1; index--)
- free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
-
- /* free the irq already requested */
- free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+ for (--index; index >= 0; index--)
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
msi_fail_out:
/* Unconfigure MSI-X capability structure */
@@ -8199,11 +8171,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
int index;
/* Free up MSI-X multi-message vectors */
- free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
-
- for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
+ for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
free_irq(phba->sli4_hba.msix_entries[index].vector,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ &phba->sli4_hba.fcp_eq_hdl[index]);
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -8249,7 +8219,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
- for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
}
@@ -8329,10 +8299,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
- for (index = 0; index < phba->cfg_fcp_eq_count;
+ for (index = 0; index < phba->cfg_fcp_io_channel;
index++) {
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
}
}
}
@@ -9449,7 +9421,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
- int adjusted_fcp_eq_count;
+ int adjusted_fcp_io_channel;
const struct firmware *fw;
uint8_t file_name[16];
@@ -9552,13 +9524,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
}
/* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- adjusted_fcp_eq_count = 0;
+ adjusted_fcp_io_channel = 1;
else if (phba->sli4_hba.msix_vec_nr <
- phba->cfg_fcp_eq_count + 1)
- adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ phba->cfg_fcp_io_channel)
+ adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
else
- adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
- phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9694,6 +9666,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* buffers are released to their corresponding pools here.
*/
lpfc_scsi_free(phba);
+
lpfc_sli4_driver_resource_unset(phba);
/* Unmap adapter Control and Doorbell registers */
@@ -10420,6 +10393,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 20336f09fb3c..efc9cd9def8b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
- pmb->context2 = (uint8_t *) mp;
+ pmb->context1 = (uint8_t *)mp;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -950,44 +950,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
+ pring->sli.sli3.sizeCiocb =
+ phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
- pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
+ pring->sli.sli3.sizeRiocb =
+ phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
- if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
+ if ((pring->sli.sli3.numCiocb == 0) ||
+ (pring->sli.sli3.numRiocb == 0)) {
pcbp->rdsc[i].cmdEntries = 0;
pcbp->rdsc[i].rspEntries = 0;
pcbp->rdsc[i].cmdAddrHigh = 0;
pcbp->rdsc[i].rspAddrHigh = 0;
pcbp->rdsc[i].cmdAddrLow = 0;
pcbp->rdsc[i].rspAddrLow = 0;
- pring->cmdringaddr = NULL;
- pring->rspringaddr = NULL;
+ pring->sli.sli3.cmdringaddr = NULL;
+ pring->sli.sli3.rspringaddr = NULL;
continue;
}
/* Command ring setup for ring */
- pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
- pcbp->rdsc[i].cmdEntries = pring->numCiocb;
+ pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
+ pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
- iocbCnt += pring->numCiocb;
+ iocbCnt += pring->sli.sli3.numCiocb;
/* Response ring setup for ring */
- pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
+ pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
- pcbp->rdsc[i].rspEntries = pring->numRiocb;
+ pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
- iocbCnt += pring->numRiocb;
+ iocbCnt += pring->sli.sli3.numRiocb;
}
}
@@ -1609,12 +1612,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
switch (mbox->mbxCommand) {
case MBX_WRITE_NV: /* 0x03 */
+ case MBX_DUMP_MEMORY: /* 0x17 */
case MBX_UPDATE_CFG: /* 0x1B */
case MBX_DOWN_LOAD: /* 0x1C */
case MBX_DEL_LD_ENTRY: /* 0x1D */
+ case MBX_WRITE_VPARMS: /* 0x32 */
case MBX_LOAD_AREA: /* 0x81 */
case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
+ case MBX_ACCESS_VDATA: /* 0xA5 */
return LPFC_MBOX_TMO_FLASH_CMD;
case MBX_SLI4_CONFIG: /* 0x9b */
subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
@@ -1625,11 +1631,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
case LPFC_MBOX_OPCODE_WRITE_OBJECT:
case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
case LPFC_MBOX_OPCODE_DELETE_OBJECT:
- case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
+ case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
+ case LPFC_MBOX_OPCODE_RESET_LICENSES:
+ case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_VPD_DATA:
+ case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ade763d3930a..cd86069a0ba8 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -194,6 +194,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
pci_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
+ if (phba->rrq_pool)
+ mempool_destroy(phba->rrq_pool);
+ phba->rrq_pool = NULL;
+
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 9133a97f045f..d8fadcb2db73 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1778,6 +1778,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
}
static uint32_t
+lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /*
+ * Take no action. If a LOGO is outstanding, then possibly DevLoss has
+ * timed out and is calling for Device Remove. In this case, the LOGO
+ * must be allowed to complete in state LOGO_ISSUE so that the rpi
+ * and other NLP flags are correctly cleaned up.
+ */
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /*
+ * Device Recovery events have no meaning for a node with a LOGO
+ * outstanding. The LOGO has to complete first and handle the
+ * node from that point.
+ */
+ return ndlp->nlp_state;
+}
+
+static uint32_t
lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
@@ -2083,6 +2194,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* For the fabric port just clear the fc flags. */
if (ndlp->nlp_DID == Fabric_DID) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2297,6 +2410,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_device_rm_prli_issue, /* DEVICE_RM */
lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
+ lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
+ lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_logo_issue, /* DEVICE_RM */
+ lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
+
lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 925975d2d765..64013f3097ad 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -60,12 +60,6 @@ static char *dif_op_str[] = {
"PROT_WRITE_PASS",
};
-static char *dif_grd_str[] = {
- "NO_GUARD",
- "DIF_CRC",
- "DIX_IP",
-};
-
struct scsi_dif_tuple {
__be16 guard_tag; /* Checksum */
__be16 app_tag; /* Opaque storage */
@@ -3482,9 +3476,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
lp = (uint32_t *)cmnd->sense_buffer;
- if (!scsi_status && (resp_info & RESID_UNDER) &&
- vport->cfg_log_verbose & LOG_FCP_UNDER)
- logit = LOG_FCP_UNDER;
+ /* special handling for under run conditions */
+ if (!scsi_status && (resp_info & RESID_UNDER)) {
+ /* don't log under runs if fcp set... */
+ if (vport->cfg_log_verbose & LOG_FCP)
+ logit = LOG_FCP_ERROR;
+ /* unless operator says so */
+ if (vport->cfg_log_verbose & LOG_FCP_UNDER)
+ logit = LOG_FCP_UNDER;
+ }
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -3552,11 +3552,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/*
* Check SLI validation that all the transfer was actually done
- * (fcpi_parm should be zero). Apply check only to reads.
+ * (fcpi_parm should be zero).
*/
- } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+ } else if (fcpi_parm) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP Read Check Error Data: "
+ "9029 FCP Data Transfer Check Error: "
"x%x x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@@ -3615,7 +3615,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd = lpfc_cmd->pCmd;
shost = cmd->device->host;
- lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+ lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
@@ -3660,10 +3660,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
else if (lpfc_cmd->status >= IOSTAT_CNT)
lpfc_cmd->status = IOSTAT_DEFAULT;
- if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
- && !lpfc_cmd->fcp_rsp->rspStatus3
- && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
- && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+ !lpfc_cmd->fcp_rsp->rspStatus3 &&
+ (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+ !(vport->cfg_log_verbose & LOG_FCP_UNDER))
logit = 0;
else
logit = LOG_FCP | LOG_FCP_UNDER;
@@ -3829,12 +3829,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ spin_lock_irq(&phba->hbalock);
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock_irq(&phba->hbalock);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
*/
spin_lock_irqsave(shost->host_lock, flags);
- lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3868,12 +3871,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
}
+ spin_lock_irq(&phba->hbalock);
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock_irq(&phba->hbalock);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
*/
spin_lock_irqsave(shost->host_lock, flags);
- lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3919,6 +3925,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
int datadir = scsi_cmnd->sc_data_direction;
char tag[2];
+ uint8_t *ptr;
+ bool sli4;
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return;
@@ -3930,8 +3938,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
- memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
- memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ ptr = &fcp_cmnd->fcpCdb[0];
+ memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+ ptr += scsi_cmnd->cmd_len;
+ memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+ }
+
if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
switch (tag[0]) {
case HEAD_OF_QUEUE_TAG:
@@ -3947,6 +3960,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else
fcp_cmnd->fcpCntl1 = 0;
+ sli4 = (phba->sli_rev == LPFC_SLI_REV4);
+
/*
* There are three possibilities here - use scatter-gather segment, use
* the single mapping, or neither. Start the lpfc command prep by
@@ -3956,11 +3971,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (sli4)
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ else {
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
- } else
- iocb_cmd->ulpPU = PARM_READ_CHECK;
+ }
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
} else {
@@ -3984,7 +4000,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (sli4)
piocbq->iocb.ulpContext =
phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
@@ -4241,9 +4257,8 @@ void lpfc_poll_timeout(unsigned long ptr)
* SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
**/
static int
-lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -4299,53 +4314,28 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
- cmnd->scsi_done = done;
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
- "guard=%s\n", cmnd->cmnd[0],
- dif_op_str[scsi_get_prot_op(cmnd)],
- dif_grd_str[scsi_host_get_guard(shost)]);
- if (cmnd->cmnd[0] == READ_10)
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9035 BLKGRD: READ @ sector %llu, "
- "cnt %u, rpt %d\n",
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request),
- (cmnd->cmnd[1]>>5));
- else if (cmnd->cmnd[0] == WRITE_10)
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9036 BLKGRD: WRITE @ sector %llu, "
- "cnt %u, wpt %d\n",
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request),
- (cmnd->cmnd[1]>>5));
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+ "9033 BLKGRD: rcvd %s cmd:x%x "
+ "sector x%llx cnt %u pt %x\n",
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ cmnd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
}
-
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
if (vport->phba->cfg_enable_bg) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9038 BLKGRD: rcvd unprotected cmd:"
- "%02x op=%s guard=%s\n", cmnd->cmnd[0],
- dif_op_str[scsi_get_prot_op(cmnd)],
- dif_grd_str[scsi_host_get_guard(shost)]);
- if (cmnd->cmnd[0] == READ_10)
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9040 dbg: READ @ sector %llu, "
- "cnt %u, rpt %d\n",
- (unsigned long long)scsi_get_lba(cmnd),
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+ "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
+ "x%x sector x%llx cnt %u pt %x\n",
+ cmnd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
- (cmnd->cmnd[1]>>5));
- else if (cmnd->cmnd[0] == WRITE_10)
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9041 dbg: WRITE @ sector %llu, "
- "cnt %u, wpt %d\n",
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request),
- (cmnd->cmnd[1]>>5));
+ (cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
@@ -4363,11 +4353,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
- spin_unlock(shost->host_lock);
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
- spin_lock(shost->host_lock);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
@@ -4384,11 +4372,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return SCSI_MLQUEUE_TARGET_BUSY;
out_fail_command:
- done(cmnd);
+ cmnd->scsi_done(cmnd);
return 0;
}
-static DEF_SCSI_QCMD(lpfc_queuecommand)
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@@ -4414,7 +4401,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
status = fc_block_scsi_eh(cmnd);
- if (status)
+ if (status != 0 && status != SUCCESS)
return status;
spin_lock_irq(&phba->hbalock);
@@ -4428,7 +4415,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
- if (!lpfc_cmd) {
+ if (!lpfc_cmd || !lpfc_cmd->pCmd) {
spin_unlock_irq(&phba->hbalock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
@@ -4521,9 +4508,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
- "for abort to complete: ret %#x, ID %d, "
- "LUN %d\n",
- ret, cmnd->device->id, cmnd->device->lun);
+ "for abortng I/O (xri:x%x) to complete: "
+ "ret %#x, ID %d, LUN %d\n",
+ iocb->sli4_xritag, ret,
+ cmnd->device->id, cmnd->device->lun);
}
goto out;
@@ -4769,7 +4757,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
}
pnode = rdata->pnode;
status = fc_block_scsi_eh(cmnd);
- if (status)
+ if (status != 0 && status != SUCCESS)
return status;
status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4836,7 +4824,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
}
pnode = rdata->pnode;
status = fc_block_scsi_eh(cmnd);
- if (status)
+ if (status != 0 && status != SUCCESS)
return status;
status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4904,7 +4892,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
status = fc_block_scsi_eh(cmnd);
- if (status)
+ if (status != 0 && status != SUCCESS)
return status;
/*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9cbd20b1328b..6bacbdf156d8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -69,6 +69,8 @@ static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int);
+static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
+ uint32_t);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -94,6 +96,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
union lpfc_wqe *temp_wqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ uint32_t idx;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -101,8 +104,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
temp_wqe = q->qe[q->host_index].wqe;
/* If the host has not yet processed the next entry then we are done */
- if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+ idx = ((q->host_index + 1) % q->entry_count);
+ if (idx == q->hba_index) {
+ q->WQ_overflow++;
return -ENOMEM;
+ }
+ q->WQ_posted++;
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % q->entry_repost))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -112,7 +119,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* Update the host index before invoking device */
host_index = q->host_index;
- q->host_index = ((q->host_index + 1) % q->entry_count);
+
+ q->host_index = idx;
/* Ring Doorbell */
doorbell.word0 = 0;
@@ -120,7 +128,6 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
- readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
return 0;
}
@@ -194,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
- readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
return 0;
}
@@ -234,6 +240,7 @@ static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
struct lpfc_eqe *eqe;
+ uint32_t idx;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -244,14 +251,34 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
if (!bf_get_le32(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
- if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+ idx = ((q->hba_index + 1) % q->entry_count);
+ if (idx == q->host_index)
return NULL;
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ q->hba_index = idx;
return eqe;
}
/**
+ * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
+ * @q: The Event Queue to disable interrupts
+ *
+ **/
+static inline void
+lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
+{
+ struct lpfc_register doorbell;
+
+ doorbell.word0 = 0;
+ bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+ bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+ (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+}
+
+/**
* lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
* @q: The Event Queue that the host has completed processing for.
* @arm: Indicates whether the host wants to arms this CQ.
@@ -318,6 +345,7 @@ static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue *q)
{
struct lpfc_cqe *cqe;
+ uint32_t idx;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -327,11 +355,12 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
- if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+ idx = ((q->hba_index + 1) % q->entry_count);
+ if (idx == q->host_index)
return NULL;
cqe = q->qe[q->hba_index].cqe;
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ q->hba_index = idx;
return cqe;
}
@@ -472,8 +501,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- return (IOCB_t *) (((char *) pring->cmdringaddr) +
- pring->cmdidx * phba->iocb_cmd_size);
+ return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
+ pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
}
/**
@@ -489,8 +518,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- return (IOCB_t *) (((char *) pring->rspringaddr) +
- pring->rspidx * phba->iocb_rsp_size);
+ return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
+ pring->sli.sli3.rspidx * phba->iocb_rsp_size);
}
/**
@@ -1320,21 +1349,23 @@ static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
- uint32_t max_cmd_idx = pring->numCiocb;
- if ((pring->next_cmdidx == pring->cmdidx) &&
- (++pring->next_cmdidx >= max_cmd_idx))
- pring->next_cmdidx = 0;
+ uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
+ if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
+ (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
+ pring->sli.sli3.next_cmdidx = 0;
- if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+ if (unlikely(pring->sli.sli3.local_getidx ==
+ pring->sli.sli3.next_cmdidx)) {
- pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
- if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+ if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0315 Ring %d issue: portCmdGet %d "
"is bigger than cmd ring %d\n",
pring->ringno,
- pring->local_getidx, max_cmd_idx);
+ pring->sli.sli3.local_getidx,
+ max_cmd_idx);
phba->link_state = LPFC_HBA_ERROR;
/*
@@ -1349,7 +1380,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return NULL;
}
- if (pring->local_getidx == pring->next_cmdidx)
+ if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
return NULL;
}
@@ -1484,8 +1515,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Let the HBA know what IOCB slot will be the next one the
* driver will put a command into.
*/
- pring->cmdidx = pring->next_cmdidx;
- writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
+ pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
+ writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
/**
@@ -2056,6 +2087,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_READ_EVENT_LOG:
case MBX_SECURITY_MGMT:
case MBX_AUTH_PORT:
+ case MBX_ACCESS_VDATA:
ret = mbxCommand;
break;
default:
@@ -2786,7 +2818,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
"0312 Ring %d handler: portRspPut %d "
"is bigger than rsp ring %d\n",
pring->ringno, le32_to_cpu(pgp->rspPutInx),
- pring->numRiocb);
+ pring->sli.sli3.numRiocb);
phba->link_state = LPFC_HBA_ERROR;
@@ -2815,10 +2847,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
void lpfc_poll_eratt(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t eratt = 0;
+ uint32_t eratt = 0, rem;
+ uint64_t sli_intr, cnt;
phba = (struct lpfc_hba *)ptr;
+ /* Here we will also keep track of interrupts per sec of the hba */
+ sli_intr = phba->sli.slistat.sli_intr;
+
+ if (phba->sli.slistat.sli_prev_intr > sli_intr)
+ cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
+ sli_intr);
+ else
+ cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
+
+ /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
+ rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+ phba->sli.slistat.sli_ips = cnt;
+
+ phba->sli.slistat.sli_prev_intr = sli_intr;
+
/* Check chip HA register for error event */
eratt = lpfc_sli_check_eratt(phba);
@@ -2873,7 +2921,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
- portRspMax = pring->numRiocb;
+ portRspMax = pring->sli.sli3.numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
@@ -2887,7 +2935,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
phba->fcp_ring_in_use = 1;
rmb();
- while (pring->rspidx != portRspPut) {
+ while (pring->sli.sli3.rspidx != portRspPut) {
/*
* Fetch an entry off the ring and copy it into a local data
* structure. The copy involves a byte-swap since the
@@ -2896,8 +2944,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
entry = lpfc_resp_iocb(phba, pring);
phba->last_completion_time = jiffies;
- if (++pring->rspidx >= portRspMax)
- pring->rspidx = 0;
+ if (++pring->sli.sli3.rspidx >= portRspMax)
+ pring->sli.sli3.rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
@@ -2915,7 +2963,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2998,9 +3047,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
- writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+ writel(pring->sli.sli3.rspidx,
+ &phba->host_gp[pring->ringno].rspGetInx);
- if (pring->rspidx == portRspPut)
+ if (pring->sli.sli3.rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
@@ -3015,7 +3065,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
- pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
@@ -3086,7 +3136,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
@@ -3247,7 +3298,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
- portRspMax = pring->numRiocb;
+ portRspMax = pring->sli.sli3.numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (portRspPut >= portRspMax) {
/*
@@ -3269,7 +3320,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
}
rmb();
- while (pring->rspidx != portRspPut) {
+ while (pring->sli.sli3.rspidx != portRspPut) {
/*
* Build a completion list and call the appropriate handler.
* The process is to get the next available response iocb, get
@@ -3297,8 +3348,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
phba->iocb_rsp_size);
irsp = &rspiocbp->iocb;
- if (++pring->rspidx >= portRspMax)
- pring->rspidx = 0;
+ if (++pring->sli.sli3.rspidx >= portRspMax)
+ pring->sli.sli3.rspidx = 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
@@ -3308,7 +3359,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
*(((uint32_t *) irsp) + 7));
}
- writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+ writel(pring->sli.sli3.rspidx,
+ &phba->host_gp[pring->ringno].rspGetInx);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/* Handle the response IOCB */
@@ -3320,10 +3372,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
* the pgp->rspPutInx in the MAILBOX_tand fetch the new port
* response put pointer.
*/
- if (pring->rspidx == portRspPut) {
+ if (pring->sli.sli3.rspidx == portRspPut) {
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
- } /* while (pring->rspidx != portRspPut) */
+ } /* while (pring->sli.sli3.rspidx != portRspPut) */
if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
/* At least one response entry has been freed */
@@ -3338,7 +3390,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
- pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
@@ -3859,10 +3911,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->flag = 0;
- pring->rspidx = 0;
- pring->next_cmdidx = 0;
- pring->local_getidx = 0;
- pring->cmdidx = 0;
+ pring->sli.sli3.rspidx = 0;
+ pring->sli.sli3.next_cmdidx = 0;
+ pring->sli.sli3.local_getidx = 0;
+ pring->sli.sli3.cmdidx = 0;
pring->missbufcnt = 0;
}
@@ -4893,16 +4945,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
fcp_eqidx = 0;
if (phba->sli4_hba.fcp_cq) {
- do
+ do {
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
- while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
- lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
- if (phba->sli4_hba.fp_eq) {
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+ if (phba->sli4_hba.hba_eq) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+ lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
}
@@ -7784,14 +7835,18 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
*
* Return: index into SLI4 fast-path FCP queue index.
**/
-static uint32_t
+static inline uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
- ++phba->fcp_qidx;
- if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
- phba->fcp_qidx = 0;
+ int i;
+
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
+ i = smp_processor_id();
+ else
+ i = atomic_add_return(1, &phba->fcp_qidx);
- return phba->fcp_qidx;
+ i = (i % phba->cfg_fcp_io_channel);
+ return i;
}
/**
@@ -8311,16 +8366,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- /*
- * For FCP command IOCB, get a new WQ index to distribute
- * WQE across the WQsr. On the other hand, for abort IOCB,
- * it carries the same WQ index to the original command
- * IOCB.
- */
- if (piocb->iocb_flag & LPFC_IO_FCP)
- piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
return IOCB_ERROR;
@@ -8401,13 +8446,68 @@ int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_queue *fpeq;
+ struct lpfc_eqe *eqe;
unsigned long iflags;
- int rc;
+ int rc, idx;
- spin_lock_irqsave(&phba->hbalock, iflags);
- rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (piocb->iocb_flag & LPFC_IO_FCP) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+
+ pring = &phba->sli.ring[ring_number];
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
+ flag);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ if (lpfc_fcp_look_ahead) {
+ fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+
+ if (atomic_dec_and_test(&fcp_eq_hdl->
+ fcp_eq_in_use)) {
+ /* Get associated EQ with this index */
+ fpeq = phba->sli4_hba.hba_eq[idx];
+
+ /* Turn off interrupts from this EQ */
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /*
+ * Process all the events on FCP EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba,
+ eqe, idx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+ lpfc_sli4_eq_release(fpeq,
+ LPFC_QUEUE_REARM);
+ }
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ }
+ } else {
+ pring = &phba->sli.ring[ring_number];
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
+ flag);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ }
+ } else {
+ /* For now, SLI2/3 will still use hbalock */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
return rc;
}
@@ -8434,18 +8534,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
/* Take some away from the FCP ring */
pring = &psli->ring[psli->fcp_ring];
- pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
- pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
- pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
- pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
/* and give them to the extra ring */
pring = &psli->ring[psli->extra_ring];
- pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
- pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
- pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
- pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
/* Setup default profile for this ring */
pring->iotag_max = 4096;
@@ -8457,56 +8557,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
-/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
- * @vport: pointer to virtual port object.
- * @ndlp: nodelist pointer for the impacted rport.
- *
- * The driver calls this routine in response to a XRI ABORT CQE
- * event from the port. In this event, the driver is required to
- * recover its login to the rport even though its login may be valid
- * from the driver's perspective. The failed ABTS notice from the
- * port indicates the rport is not responding.
- */
-static void
-lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
- struct lpfc_nodelist *ndlp)
-{
- struct Scsi_Host *shost;
- struct lpfc_hba *phba;
- unsigned long flags = 0;
-
- shost = lpfc_shost_from_vport(vport);
- phba = vport->phba;
- if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
- lpfc_printf_log(phba, KERN_INFO,
- LOG_SLI, "3093 No rport recovery needed. "
- "rport in state 0x%x\n",
- ndlp->nlp_state);
- return;
- }
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "3094 Start rport recovery on shost id 0x%x "
- "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
- "flags 0x%x\n",
- shost->host_no, ndlp->nlp_DID,
- vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
- /*
- * The rport is not responding. Don't attempt ADISC recovery.
- * Remove the FCP-2 flag to force a PLOGI.
- */
- spin_lock_irqsave(shost->host_lock, flags);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irqsave(shost->host_lock, flags);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_disc_start(vport);
-}
-
/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to iocb object.
@@ -8594,7 +8644,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
* LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
* LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
*/
- ext_status = axri->parameter & WCQE_PARAM_MASK;
+ ext_status = axri->parameter & IOERR_PARAM_MASK;
if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
lpfc_sli_abts_recover_port(vport, ndlp);
@@ -8692,7 +8742,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- psli->num_rings = MAX_CONFIGURED_RINGS;
+ psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ psli->num_rings += phba->cfg_fcp_io_channel;
psli->sli_flag = 0;
psli->fcp_ring = LPFC_FCP_RING;
psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -8707,16 +8759,20 @@ lpfc_sli_setup(struct lpfc_hba *phba)
switch (i) {
case LPFC_FCP_RING: /* ring 0 - FCP */
/* numCiocb and numRiocb are used in config_port */
- pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
- pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
- pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
- pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
- pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
- pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
- pring->sizeCiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+ pring->sli.sli3.numCiocb +=
+ SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb +=
+ SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb +=
+ SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb +=
+ SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
- pring->sizeRiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_ctr = 0;
@@ -8727,12 +8783,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
break;
case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
/* numCiocb and numRiocb are used in config_port */
- pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
- pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
- pring->sizeCiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
- pring->sizeRiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_max = phba->cfg_hba_queue_depth;
@@ -8740,12 +8796,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
break;
case LPFC_ELS_RING: /* ring 2 - ELS / CT */
/* numCiocb and numRiocb are used in config_port */
- pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
- pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
- pring->sizeCiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
- pring->sizeRiocb = (phba->sli_rev == 3) ?
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->fast_iotag = 0;
@@ -8786,8 +8842,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
lpfc_sli4_ct_abort_unsol_event;
break;
}
- totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
- (pring->numRiocb * pring->sizeRiocb);
+ totiocbsize += (pring->sli.sli3.numCiocb *
+ pring->sli.sli3.sizeCiocb) +
+ (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
}
if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -8828,14 +8885,15 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->ringno = i;
- pring->next_cmdidx = 0;
- pring->local_getidx = 0;
- pring->cmdidx = 0;
+ pring->sli.sli3.next_cmdidx = 0;
+ pring->sli.sli3.local_getidx = 0;
+ pring->sli.sli3.cmdidx = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
INIT_LIST_HEAD(&pring->iocb_continue_saveq);
INIT_LIST_HEAD(&pring->postbufq);
+ spin_lock_init(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
return 1;
@@ -9334,6 +9392,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
int retval;
+ unsigned long iflags;
/*
* There are certain command types we don't want to abort. And we
@@ -9386,7 +9445,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->un.acxri.abortIoTag,
iabt->un.acxri.abortContextTag,
abtsiocbp->iotag);
- retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* Note: both hbalock and ring_lock need to be set here */
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbp, 0);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ } else {
+ retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbp, 0);
+ }
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -10947,12 +11016,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
unsigned long iflags;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11154,6 +11223,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
/**
* lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
* @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
* @wcqe: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event.
@@ -11161,12 +11231,12 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
-lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+ struct lpfc_sli_ring *pring = cq->pring;
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
@@ -11311,14 +11381,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
+ hrq->RQ_buf_trunc++;
case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
+ hrq->RQ_rcv_buf++;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11403,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
break;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
case FC_STATUS_INSUFF_BUF_FRM_DISC:
+ hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11367,7 +11441,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case CQE_CODE_COMPL_WQE:
/* Process the WQ/RQ complete event */
phba->last_completion_time = jiffies;
- workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+ workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&cqevt);
break;
case CQE_CODE_RELEASE_WQE:
@@ -11411,31 +11485,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
*
**/
static void
-lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+ struct lpfc_queue *speq)
{
- struct lpfc_queue *cq = NULL, *childq, *speq;
+ struct lpfc_queue *cq = NULL, *childq;
struct lpfc_cqe *cqe;
bool workposted = false;
int ecount = 0;
uint16_t cqid;
- if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0359 Not a valid slow-path completion "
- "event: majorcode=x%x, minorcode=x%x\n",
- bf_get_le32(lpfc_eqe_major_code, eqe),
- bf_get_le32(lpfc_eqe_minor_code, eqe));
- return;
- }
-
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
- /* Search for completion queue pointer matching this cqid */
- speq = phba->sli4_hba.sp_eq;
- /* sanity check on queue memory */
- if (unlikely(!speq))
- return;
list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
@@ -11457,6 +11518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ cq->CQ_mbox++;
}
break;
case LPFC_WCQ:
@@ -11470,6 +11532,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11494,34 +11560,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/**
* lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
- * @eqe: Pointer to fast-path completion queue entry.
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
*
* This routine process a fast-path work queue completion entry from fast-path
* event queue for FCP command response completion.
**/
static void
-lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+ struct lpfc_sli_ring *pring = cq->pring;
struct lpfc_iocbq *cmdiocbq;
struct lpfc_iocbq irspiocbq;
unsigned long iflags;
- spin_lock_irqsave(&phba->hbalock, iflags);
- pring->stats.iocb_event++;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
-
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
/* If resource errors reported from HBA, reduce queue
* depth of the SCSI device.
*/
- if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
- IOSTAT_LOCAL_REJECT) &&
- (wcqe->parameter == IOERR_NO_RESOURCES)) {
+ if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
+ IOSTAT_LOCAL_REJECT)) &&
+ ((wcqe->parameter & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES))
phba->lpfc_rampdown_queue_depth(phba);
- }
+
/* Log the error status */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0373 FCP complete error: status=x%x, "
@@ -11534,10 +11599,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
}
/* Look up the FCP command IOCB and create pseudo response IOCB */
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ pring->stats.iocb_event++;
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
@@ -11621,17 +11687,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
+ cq->CQ_wq++;
/* Process the WQ complete event */
phba->last_completion_time = jiffies;
- lpfc_sli4_fp_handle_fcp_wcqe(phba,
+ lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&wcqe);
break;
case CQE_CODE_RELEASE_WQE:
+ cq->CQ_release_wqe++;
/* Process the WQ release event */
lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
(struct lpfc_wcqe_release *)&wcqe);
break;
case CQE_CODE_XRI_ABORTED:
+ cq->CQ_xri_aborted++;
/* Process the WQ XRI abort event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11647,7 +11716,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
/**
- * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
* @phba: Pointer to HBA context object.
* @eqe: Pointer to fast-path event queue entry.
*
@@ -11659,8 +11728,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* completion queue, and then return.
**/
static void
-lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
- uint32_t fcp_cqidx)
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+ uint32_t qidx)
{
struct lpfc_queue *cq;
struct lpfc_cqe *cqe;
@@ -11670,30 +11739,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0366 Not a valid fast-path completion "
+ "0366 Not a valid completion "
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Check if this is a Slow path event */
+ if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
+ lpfc_sli4_sp_handle_eqe(phba, eqe,
+ phba->sli4_hba.hba_eq[qidx]);
+ return;
+ }
+
if (unlikely(!phba->sli4_hba.fcp_cq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3146 Fast-path completion queues "
"does not exist\n");
return;
}
- cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+ cq = phba->sli4_hba.fcp_cq[qidx];
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue "
- "(%d) does not exist\n", fcp_cqidx);
+ "(%d) does not exist\n", qidx);
return;
}
- /* Get the reference to the corresponding CQ */
- cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
@@ -11709,6 +11786,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11737,86 +11818,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
}
/**
- * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
- * @irq: Interrupt number.
- * @dev_id: The device context pointer.
- *
- * This function is directly called from the PCI layer as an interrupt
- * service routine when device with SLI-4 interface spec is enabled with
- * MSI-X multi-message interrupt mode and there are slow-path events in
- * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
- * interrupt mode, this function is called as part of the device-level
- * interrupt handler. When the PCI slot is in error recovery or the HBA is
- * undergoing initialization, the interrupt handler will not process the
- * interrupt. The link attention and ELS ring attention events are handled
- * by the worker thread. The interrupt handler signals the worker thread
- * and returns for these events. This function is called without any lock
- * held. It gets the hbalock to access and update SLI data structures.
- *
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
- **/
-irqreturn_t
-lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
-{
- struct lpfc_hba *phba;
- struct lpfc_queue *speq;
- struct lpfc_eqe *eqe;
- unsigned long iflag;
- int ecount = 0;
-
- /*
- * Get the driver's phba structure from the dev_id
- */
- phba = (struct lpfc_hba *)dev_id;
-
- if (unlikely(!phba))
- return IRQ_NONE;
-
- /* Get to the EQ struct associated with this vector */
- speq = phba->sli4_hba.sp_eq;
- if (unlikely(!speq))
- return IRQ_NONE;
-
- /* Check device state for handling interrupt */
- if (unlikely(lpfc_intr_state_check(phba))) {
- /* Check again for link_state with lock held */
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->link_state < LPFC_LINK_DOWN)
- /* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, speq);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- return IRQ_NONE;
- }
-
- /*
- * Process all the event on FCP slow-path EQ
- */
- while ((eqe = lpfc_sli4_eq_get(speq))) {
- lpfc_sli4_sp_handle_eqe(phba, eqe);
- if (!(++ecount % speq->entry_repost))
- lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
- }
-
- /* Always clear and re-arm the slow-path EQ */
- lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
-
- /* Catch the no cq entry condition */
- if (unlikely(ecount == 0)) {
- if (phba->intr_type == MSIX)
- /* MSI-X treated interrupt served as no EQ share INT */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0357 MSI-X interrupt with no EQE\n");
- else
- /* Non MSI-X treated on interrupt as EQ share INT */
- return IRQ_NONE;
- }
-
- return IRQ_HANDLED;
-} /* lpfc_sli4_sp_intr_handler */
-
-/**
- * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
@@ -11833,11 +11835,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
* the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
* equal to that of FCP CQ index.
*
+ * The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
-lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11854,22 +11861,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
if (unlikely(!phba))
return IRQ_NONE;
- if (unlikely(!phba->sli4_hba.fp_eq))
+ if (unlikely(!phba->sli4_hba.hba_eq))
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
- fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
if (unlikely(!fpeq))
return IRQ_NONE;
+ if (lpfc_fcp_look_ahead) {
+ if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
+ lpfc_sli4_eq_clr_intr(fpeq);
+ else {
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_NONE;
+ }
+ }
+
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
+ fpeq->EQ_badstate++;
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (lpfc_fcp_look_ahead)
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
return IRQ_NONE;
}
@@ -11877,15 +11896,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+ lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+ fpeq->EQ_processed++;
}
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > fpeq->EQ_max_eqe)
+ fpeq->EQ_max_eqe = ecount;
+
/* Always clear and re-arm the fast-path EQ */
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+
+ if (lpfc_fcp_look_ahead) {
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_NONE;
+ }
+
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11895,6 +11926,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+ if (lpfc_fcp_look_ahead)
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
return IRQ_HANDLED;
} /* lpfc_sli4_fp_intr_handler */
@@ -11919,8 +11952,8 @@ irqreturn_t
lpfc_sli4_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
- irqreturn_t sp_irq_rc, fp_irq_rc;
- bool fp_handled = false;
+ irqreturn_t hba_irq_rc;
+ bool hba_handled = false;
uint32_t fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
@@ -11930,21 +11963,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
/*
- * Invokes slow-path host attention interrupt handling as appropriate.
- */
- sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
-
- /*
* Invoke fast-path host attention interrupt handling as appropriate.
*/
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
- fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
&phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
- if (fp_irq_rc == IRQ_HANDLED)
- fp_handled |= true;
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
}
- return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+ return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
/**
@@ -12075,7 +12103,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
- if (startq >= phba->cfg_fcp_eq_count)
+ if (startq >= phba->cfg_fcp_io_channel)
return 0;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12089,12 +12117,13 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
eq_delay = &mbox->u.mqe.un.eq_delay;
/* Calculate delay multiper from maximum interrupt per second */
- dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
+ dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
+ dmult = LPFC_DMULT_CONST/dmult - 1;
cnt = 0;
- for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count;
+ for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++) {
- eq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ eq = phba->sli4_hba.hba_eq[fcp_eqidx];
if (!eq)
continue;
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2626f58c0747..2f48d000a3b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -131,7 +131,9 @@ typedef struct lpfcMboxq {
#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
ring */
-#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
+#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver.
+ For SLI4, an additional ring for each
+ FCP WQ will be allocated. */
struct lpfc_sli_ring;
@@ -158,6 +160,24 @@ struct lpfc_sli_ring_stat {
uint64_t iocb_rsp_full; /* IOCB rsp ring full */
};
+struct lpfc_sli3_ring {
+ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
+ uint32_t next_cmdidx; /* next_cmd index */
+ uint32_t rspidx; /* current index in response ring */
+ uint32_t cmdidx; /* current index in command ring */
+ uint16_t numCiocb; /* number of command iocb's per ring */
+ uint16_t numRiocb; /* number of rsp iocb's per ring */
+ uint16_t sizeCiocb; /* Size of command iocb's in this ring */
+ uint16_t sizeRiocb; /* Size of response iocb's in this ring */
+ uint32_t *cmdringaddr; /* virtual address for cmd rings */
+ uint32_t *rspringaddr; /* virtual address for rsp rings */
+};
+
+struct lpfc_sli4_ring {
+ struct lpfc_queue *wqp; /* Pointer to associated WQ */
+};
+
+
/* Structure used to hold SLI ring information */
struct lpfc_sli_ring {
uint16_t flag; /* ring flags */
@@ -166,16 +186,10 @@ struct lpfc_sli_ring {
#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
- uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
- uint32_t next_cmdidx; /* next_cmd index */
- uint32_t rspidx; /* current index in response ring */
- uint32_t cmdidx; /* current index in command ring */
uint8_t rsvd;
uint8_t ringno; /* ring number */
- uint16_t numCiocb; /* number of command iocb's per ring */
- uint16_t numRiocb; /* number of rsp iocb's per ring */
- uint16_t sizeCiocb; /* Size of command iocb's in this ring */
- uint16_t sizeRiocb; /* Size of response iocb's in this ring */
+
+ spinlock_t ring_lock; /* lock for issuing commands */
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -186,8 +200,6 @@ struct lpfc_sli_ring {
struct list_head txcmplq;
uint16_t txcmplq_cnt; /* current length of queue */
uint16_t txcmplq_max; /* max length */
- uint32_t *cmdringaddr; /* virtual address for cmd rings */
- uint32_t *rspringaddr; /* virtual address for rsp rings */
uint32_t missbufcnt; /* keep track of buffers to post */
struct list_head postbufq;
uint16_t postbufq_cnt; /* current length of queue */
@@ -207,6 +219,10 @@ struct lpfc_sli_ring {
/* cmd ring available */
void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
struct lpfc_sli_ring *);
+ union {
+ struct lpfc_sli3_ring sli3;
+ struct lpfc_sli4_ring sli4;
+ } sli;
};
/* Structure used for configuring rings to a specific profile or rctl / type */
@@ -239,6 +255,8 @@ struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */
uint64_t mbox_cmd; /* Mailbox commands issued */
uint64_t sli_intr; /* Count of Host Attention interrupts */
+ uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */
+ uint64_t sli_ips; /* Host Attention interrupts per sec */
uint32_t err_attn_event; /* Error Attn event counters */
uint32_t link_event; /* Link event counters */
uint32_t mbox_event; /* Mailbox event counters */
@@ -270,7 +288,7 @@ struct lpfc_sli {
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
- struct lpfc_sli_ring ring[LPFC_MAX_RING];
+ struct lpfc_sli_ring *ring;
int fcp_ring; /* ring used for FCP initiator commands */
int next_ring;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index ec756118c5c1..bd4bc4342ae2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -34,18 +34,10 @@
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254
-/* Multi-queue arrangement for fast-path FCP work queues */
-#define LPFC_FN_EQN_MAX 8
-#define LPFC_SP_EQN_DEF 1
-#define LPFC_FP_EQN_DEF 4
-#define LPFC_FP_EQN_MIN 1
-#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
-
-#define LPFC_FN_WQN_MAX 32
-#define LPFC_SP_WQN_DEF 1
-#define LPFC_FP_WQN_DEF 4
-#define LPFC_FP_WQN_MIN 1
-#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
+#define LPFC_FCP_IO_CHAN_DEF 4
+#define LPFC_FCP_IO_CHAN_MIN 1
+#define LPFC_FCP_IO_CHAN_MAX 8
/*
* Provide the default FCF Record attributes used by the driver
@@ -141,6 +133,37 @@ struct lpfc_queue {
uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
+
+ struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+
+ /* For q stats */
+ uint32_t q_cnt_1;
+ uint32_t q_cnt_2;
+ uint32_t q_cnt_3;
+ uint64_t q_cnt_4;
+/* defines for EQ stats */
+#define EQ_max_eqe q_cnt_1
+#define EQ_no_entry q_cnt_2
+#define EQ_badstate q_cnt_3
+#define EQ_processed q_cnt_4
+
+/* defines for CQ stats */
+#define CQ_mbox q_cnt_1
+#define CQ_max_cqe q_cnt_1
+#define CQ_release_wqe q_cnt_2
+#define CQ_xri_aborted q_cnt_3
+#define CQ_wq q_cnt_4
+
+/* defines for WQ stats */
+#define WQ_overflow q_cnt_1
+#define WQ_posted q_cnt_4
+
+/* defines for RQ stats */
+#define RQ_no_posted_buf q_cnt_1
+#define RQ_no_buf_found q_cnt_2
+#define RQ_buf_trunc q_cnt_3
+#define RQ_rcv_buf q_cnt_4
+
union sli4_qe qe[1]; /* array to index entries (must be last) */
};
@@ -350,6 +373,7 @@ struct lpfc_hba;
struct lpfc_fcp_eq_hdl {
uint32_t idx;
struct lpfc_hba *phba;
+ atomic_t fcp_eq_in_use;
};
/* Port Capabilities for SLI4 Parameters */
@@ -407,6 +431,8 @@ struct lpfc_sli4_lnk_info {
uint8_t lnk_no;
};
+#define LPFC_SLI4_HANDLER_NAME_SZ 16
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -463,20 +489,23 @@ struct lpfc_sli4_hba {
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
+ uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
uint32_t cfg_eqn;
uint32_t msix_vec_nr;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+
/* Pointers to the constructed SLI4 queues */
- struct lpfc_queue **fp_eq; /* Fast-path event queue */
- struct lpfc_queue *sp_eq; /* Slow-path event queue */
+ struct lpfc_queue **hba_eq;/* Event queues for HBA */
+ struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+ uint16_t *fcp_cq_map;
+
+ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
- struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
- struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
- struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
/* Setup information for various queue parameters */
int eq_esize;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4704e5b5088e..04265a1c4e52 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,11 +18,16 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.32"
+#define LPFC_DRIVER_VERSION "8.3.34"
#define LPFC_DRIVER_NAME "lpfc"
+
+/* Used for SLI 2/3 */
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
+/* Used for SLI4 */
+#define LPFC_DRIVER_HANDLER_NAME "lpfc:"
+
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e8f892647681..fcb005fa4bd1 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.15-rc1"
-#define MEGASAS_RELDATE "Mar. 19, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Mar. 19 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "00.00.06.18-rc1"
+#define MEGASAS_RELDATE "Jun. 17, 2012"
+#define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012"
/*
* Device IDs
@@ -747,6 +747,7 @@ struct megasas_ctrl_info {
#define MEGASAS_RESET_NOTICE_INTERVAL 5
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
/*
* FW reports the maximum of number of commands that it can accept (maximum
@@ -1364,6 +1365,7 @@ struct megasas_instance {
unsigned long bar;
long reset_flags;
struct mutex reset_mutex;
+ int throttlequeuedepth;
};
enum {
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ed38454228c6..0393ec478cdf 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.15-rc1
+ * Version : v00.00.06.18-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -71,6 +71,16 @@ static int msix_disable;
module_param(msix_disable, int, S_IRUGO);
MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+ "Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+ "before resetting adapter. Default: 180");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -1595,8 +1605,9 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
{
unsigned long flags;
if (instance->flag & MEGASAS_FW_BUSY
- && time_after(jiffies, instance->last_time + 5 * HZ)
- && atomic_read(&instance->fw_outstanding) < 17) {
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) <
+ instance->throttlequeuedepth + 1) {
spin_lock_irqsave(instance->host->host_lock, flags);
instance->flag &= ~MEGASAS_FW_BUSY;
@@ -1772,7 +1783,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
return SUCCESS;
}
- for (i = 0; i < wait_time; i++) {
+ for (i = 0; i < resetwaittime; i++) {
int outstanding = atomic_read(&instance->fw_outstanding);
@@ -1914,7 +1925,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
/* FW is busy, throttle IO */
spin_lock_irqsave(instance->host->host_lock, flags);
- instance->host->can_queue = 16;
+ instance->host->can_queue = instance->throttlequeuedepth;
instance->last_time = jiffies;
instance->flag |= MEGASAS_FW_BUSY;
@@ -3577,6 +3588,24 @@ static int megasas_init_fw(struct megasas_instance *instance)
kfree(ctrl_info);
+ /* Check for valid throttlequeuedepth module parameter */
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) {
+ if (throttlequeuedepth > (instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS))
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+ else
+ instance->throttlequeuedepth = throttlequeuedepth;
+ } else {
+ if (throttlequeuedepth > (instance->max_fw_cmds -
+ MEGASAS_INT_CMDS))
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+ else
+ instance->throttlequeuedepth = throttlequeuedepth;
+ }
+
/*
* Setup tasklet for cmd completion
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a610cf1d4847..ddf094e7d0ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -94,6 +94,7 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
+extern int resetwaittime;
/**
* megasas_enable_intr_fusion - Enables interrupts
@@ -461,8 +462,8 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
* Allocate the dynamic array first and then allocate individual
* commands.
*/
- fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *)
- *max_cmd, GFP_KERNEL);
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
+ * max_cmd, GFP_KERNEL);
if (!fusion->cmd_list) {
printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
@@ -470,9 +471,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
goto fail_cmd_list;
}
- memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *)
- *max_cmd);
-
max_cmd = instance->max_fw_cmds;
for (i = 0; i < max_cmd; i++) {
fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
@@ -2063,9 +2061,9 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
{
int i, outstanding, retval = 0;
- u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME;
+ u32 fw_state;
- for (i = 0; i < wait_time; i++) {
+ for (i = 0; i < resetwaittime; i++) {
/* Check if firmware is in fault state */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index bbb7e4bf30a3..39f08dd20556 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT2SAS
#
# This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2010 LSI Corporation
+# Copyright (C) 2007-2012 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a80f3220c641..e960f9625c78 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2011 LSI Corporation.
+ * Copyright (c) 2000-2012 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.23
+ * mpi2.h Version: 02.00.25
*
* Version History
* ---------------
@@ -72,6 +72,9 @@
* 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
* 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
* 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
* --------------------------------------------------------------------------
*/
@@ -97,7 +100,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x17)
+#define MPI2_HEADER_VERSION_UNIT (0x19)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -275,6 +278,11 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+/* Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
/*****************************************************************************
*
* Message Descriptors
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index de90162413c2..38c5da398143 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2012 LSI Corporation.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.11
+ * mpi2_init.h Version: 02.00.13
*
* Version History
* ---------------
@@ -34,6 +34,8 @@
* 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
* 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
* 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
* --------------------------------------------------------------------------
*/
@@ -194,6 +196,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/* alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 9a925c07a9ec..b0d4760bb17d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2011 LSI Corporation.
+ * Copyright (c) 2000-2012 LSI Corporation.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.19
+ * mpi2_ioc.h Version: 02.00.21
*
* Version History
* ---------------
@@ -117,6 +117,7 @@
* 08-24-11 02.00.19 Added PhysicalPort field to
* MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
* Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
* --------------------------------------------------------------------------
*/
@@ -492,7 +493,8 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
#define MPI2_EVENT_HOST_MESSAGE (0x0028)
-
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
/* Log Entry Added Event data */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 0601612b875a..2b38af213beb 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2012 LSI Corporation.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.06
+ * mpi2_raid.h Version: 02.00.08
*
* Version History
* ---------------
@@ -26,7 +26,7 @@
* 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
* related structures and defines.
* Added product-specific range to RAID Action values.
-
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
* --------------------------------------------------------------------------
*/
@@ -181,6 +181,7 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b25757d1e91b..db5960855cdf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -1971,9 +1971,9 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
- case MPT2SAS_INTEL_RAMSDALE_SSDID:
+ case MPT2SAS_INTEL_SSD910_SSDID:
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RAMSDALE_BRANDING);
+ MPT2SAS_INTEL_SSD910_BRANDING);
break;
default:
break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index b3a1a30055d6..543d8d637479 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "13.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 13
+#define MPT2SAS_DRIVER_VERSION "14.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 14
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -171,8 +171,8 @@
"Intel Integrated RAID Module RMS2LL040"
#define MPT2SAS_INTEL_RS25GB008_BRANDING \
"Intel(R) RAID Controller RS25GB008"
-#define MPT2SAS_INTEL_RAMSDALE_BRANDING \
- "Intel 720 Series SSD"
+#define MPT2SAS_INTEL_SSD910_BRANDING \
+ "Intel(R) SSD 910 Series"
/*
* Intel HBA SSDIDs
*/
@@ -183,7 +183,7 @@
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
-#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700
+#define MPT2SAS_INTEL_SSD910_SSDID 0x3700
/*
* HP HBA branding
@@ -1096,6 +1096,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 2b4d37613d32..863778071a9d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -683,6 +683,42 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
}
/**
+ * mpt2sas_config_get_iounit_pg3 - obtain iounit page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
* mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 49bdd2dc8452..08685c4cf231 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -2181,10 +2181,12 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
return -EAGAIN;
state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
- if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
- else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
return -ERESTARTSYS;
+ }
switch (cmd) {
case MPT2IOCINFO:
@@ -2690,6 +2692,75 @@ _ctl_ioc_reply_queue_count_show(struct device *cdev,
static DEVICE_ATTR(reply_queue_count, S_IRUGO,
_ctl_ioc_reply_queue_count_show, NULL);
+/**
+ * _ctl_BRM_status_show - Backup Rail Monitor Status
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 backup_rail_monitor_status = 0;
+ u16 ioc_status;
+ int sz;
+ ssize_t rc = 0;
+
+ if (!ioc->is_warpdrive) {
+ printk(MPT2SAS_ERR_FMT "%s: BRM attribute is only for"\
+ "warpdrive\n", ioc->name, __func__);
+ goto out;
+ }
+
+ /* allocate upto GPIOVal 36 entries */
+ sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
+ io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
+ if (!io_unit_pg3) {
+ printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"\
+ "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+ goto out;
+ }
+
+ if (mpt2sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+ 0) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed reading iounit_pg3\n", ioc->name,
+ __func__);
+ goto out;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "%s: iounit_pg3 failed with"\
+ "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+ goto out;
+ }
+
+ if (io_unit_pg3->GPIOCount < 25) {
+ printk(MPT2SAS_ERR_FMT "%s: iounit_pg3->GPIOCount less than"\
+ "25 entries, detected (%d) entries\n", ioc->name, __func__,
+ io_unit_pg3->GPIOCount);
+ goto out;
+ }
+
+ /* BRM status is in bit zero of GPIOVal[24] */
+ backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+ rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
+
+ out:
+ kfree(io_unit_pg3);
+ return rc;
+}
+static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+
struct DIAG_BUFFER_START {
__le32 Size;
__le32 DiagVersion;
@@ -2901,6 +2972,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
&dev_attr_host_trace_buffer,
&dev_attr_host_trace_buffer_enable,
&dev_attr_reply_queue_count,
+ &dev_attr_BRM_status,
NULL,
};
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 11ff1d5fb8f0..b5eb0d1b8ea6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 9731f8e661bf..69cc7d0c112c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index b1ebd6f8dab3..96f4d1f1cf5e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -119,6 +119,15 @@ module_param(diag_buffer_enable, int, 0);
MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
"(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = 0;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
/**
* struct sense_info - common structure for obtaining sense keys
* @skey: sense key
@@ -3768,8 +3777,6 @@ static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
{
u8 ascq;
- u8 sk;
- u8 host_byte;
switch (ioc_status) {
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
@@ -3786,16 +3793,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
break;
}
- if (scmd->sc_data_direction == DMA_TO_DEVICE) {
- sk = ILLEGAL_REQUEST;
- host_byte = DID_ABORT;
- } else {
- sk = ABORTED_COMMAND;
- host_byte = DID_OK;
- }
-
- scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
- scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
SAM_STAT_CHECK_CONDITION;
}
@@ -5973,8 +5972,14 @@ _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
#endif
if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
- !ioc->sas_hba.num_phys)
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
_scsih_sas_host_add(ioc);
+ }
}
/**
@@ -7254,7 +7259,8 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
- if (!ioc->is_driver_loading) {
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
@@ -7929,6 +7935,9 @@ _scsih_scan_start(struct Scsi_Host *shost)
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ if (disable_discovery > 0)
+ return;
+
ioc->start_scan = 1;
rc = mpt2sas_port_enable(ioc);
@@ -7950,6 +7959,12 @@ _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
if (time >= (300 * HZ)) {
ioc->base_cmds.status = MPT2_CMD_NOT_USED;
printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
@@ -8055,8 +8070,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (max_sectors != 0xFFFF) {
if (max_sectors < 64) {
shost->max_sectors = 64;
- printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
- "for max_sectors, range is 64 to 8192. Assigning "
+ printk(MPT2SAS_WARN_FMT "Invalid value %d passed "\
+ "for max_sectors, range is 64 to 32767. Assigning "\
"value of 64.\n", ioc->name, max_sectors);
} else if (max_sectors > 32767) {
shost->max_sectors = 32767;
@@ -8078,8 +8093,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_add_shost_fail;
}
- scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
- | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION);
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
/* event thread */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index c6cf20f60720..8c2ffbe6af0f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2010 LSI Corporation
+ * Copyright (C) 2007-2012 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 4539d59a0857..a3776d6ced60 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
mv_dprintk("mvs_abort_task() mvi=%p task=%p "
"slot=%p slot_idx=x%x\n",
mvi, task, slot, slot_idx);
- mvs_tmf_timedout((unsigned long)task);
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
mvs_slot_task_free(mvi, task, slot, slot_idx);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5ab953029f8d..1c28215f8bed 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -26,7 +26,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (ha->fw_dump_reading == 0)
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
return 0;
if (IS_QLA82XX(ha)) {
@@ -39,9 +39,14 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
rval = memory_read_from_buffer(buf, count,
&off, ha->md_dump, ha->md_dump_size);
return rval;
- } else
+ } else if (ha->mctp_dumped && ha->mctp_dump_reading)
+ return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+ MCTP_DUMP_SIZE);
+ else if (ha->fw_dump_reading)
return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
+ else
+ return 0;
}
static ssize_t
@@ -107,6 +112,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (IS_QLA82XX(ha))
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
+ case 6:
+ if (!ha->mctp_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70c1,
+ "MCTP dump cleared on (%ld).\n", vha->host_no);
+ ha->mctp_dump_reading = 0;
+ ha->mctp_dumped = 0;
+ break;
+ case 7:
+ if (ha->mctp_dumped && !ha->mctp_dump_reading) {
+ ha->mctp_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70c2,
+ "Raw mctp dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
}
return count;
}
@@ -564,6 +585,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
+ uint32_t idc_control;
if (off != 0)
return -EINVAL;
@@ -587,22 +609,36 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- /* Make sure FC side is not in reset */
- qla2x00_wait_for_hba_online(vha);
-
- /* Issue MPI reset */
- scsi_block_requests(vha->host);
- if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0x7070,
- "MPI reset failed.\n");
- scsi_unblock_requests(vha->host);
- break;
+ if (IS_QLA83XX(ha)) {
+ uint32_t idc_control;
+
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+ } else {
+ /* Make sure FC side is not in reset */
+ qla2x00_wait_for_hba_online(vha);
+
+ /* Issue MPI reset */
+ scsi_block_requests(vha->host);
+ if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x7070,
+ "MPI reset failed.\n");
+ scsi_unblock_requests(vha->host);
+ break;
+ }
case 0x2025e:
if (!IS_QLA82XX(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
@@ -616,6 +652,29 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_fcoe_ctx_reset(vha);
break;
+ case 0x2025f:
+ if (!IS_QLA8031(ha))
+ return -EPERM;
+ ql_log(ql_log_info, vha, 0x70bc,
+ "Disabling Reset by IDC control\n");
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control |= QLA83XX_IDC_RESET_DISABLED;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+ case 0x20260:
+ if (!IS_QLA8031(ha))
+ return -EPERM;
+ ql_log(ql_log_info, vha, 0x70bd,
+ "Enabling Reset by IDC control\n");
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+
}
return count;
}
@@ -1251,6 +1310,49 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
state[1], state[2], state[3], state[4]);
}
+static ssize_t
+qla2x00_diag_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_BIDI_CAPABLE(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
+}
+
+static ssize_t
+qla2x00_diag_megabytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_BIDI_CAPABLE(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ vha->bidi_stats.transfer_bytes >> 20);
+}
+
+static ssize_t
+qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t size;
+
+ if (!ha->fw_dumped)
+ size = 0;
+ else if (IS_QLA82XX(ha))
+ size = ha->md_template_size + ha->md_dump_size;
+ else
+ size = ha->fw_dump_len;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1289,6 +1391,9 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
+static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
+static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
+static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1318,6 +1423,9 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_fw_state,
&dev_attr_optrom_gold_fw_version,
&dev_attr_thermal_temp,
+ &dev_attr_diag_requests,
+ &dev_attr_diag_megabytes,
+ &dev_attr_fw_dump_size,
NULL,
};
@@ -1704,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
- int prot = 0;
+ int prot = 0, guard;
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -1717,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
- scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
+
+ guard = SHOST_DIX_GUARD_CRC;
+
+ if (IS_PI_IPGUARD_CAPABLE(ha) &&
+ (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+ guard |= SHOST_DIX_GUARD_IP;
+
+ scsi_host_set_guard(vha->host, guard);
} else
vha->flags.difdix_supported = 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c68883806c54..2f9bddd3c616 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -530,13 +530,13 @@ done_unmap_sg:
done:
return rval;
}
-
-/* Set the port configuration to enable the
- * internal loopback on ISP81XX
+/*
+ * Set the port configuration to enable the internal or external loopback
+ * depending on the loopback mode.
*/
static inline int
-qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
- uint16_t *new_config)
+qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ uint16_t *new_config, uint16_t mode)
{
int ret = 0;
int rval = 0;
@@ -545,8 +545,14 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_set_internal;
- new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+ if (mode == INTERNAL_LOOPBACK)
+ new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
+ else if (mode == EXTERNAL_LOOPBACK)
+ new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
+ ql_dbg(ql_dbg_user, vha, 0x70be,
+ "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
+
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
ha->notify_dcbx_comp = 1;
ret = qla81xx_set_port_config(vha, new_config);
@@ -562,9 +568,17 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
"State change notification not received.\n");
- } else
- ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ rval = -EINVAL;
+ } else {
+ if (ha->flags.idc_compl_status) {
+ ql_dbg(ql_dbg_user, vha, 0x70c3,
+ "Bad status in IDC Completion AEN\n");
+ rval = -EINVAL;
+ ha->flags.idc_compl_status = 0;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7023,
+ "State change received.\n");
+ }
ha->notify_dcbx_comp = 0;
@@ -572,11 +586,9 @@ done_set_internal:
return rval;
}
-/* Set the port configuration to disable the
- * internal loopback on ISP81XX
- */
+/* Disable loopback mode */
static inline int
-qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
int wait)
{
int ret = 0;
@@ -589,8 +601,12 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
memset(new_config, 0 , sizeof(new_config));
if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK) {
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
ha->notify_dcbx_comp = wait;
@@ -707,7 +723,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
- if ((ha->current_topology == ISP_CFG_F ||
+ if (atomic_read(&vha->loop_state) == LOOP_READY &&
+ (ha->current_topology == ISP_CFG_F ||
((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -729,30 +746,24 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
goto done_free_dma_req;
}
- if (elreq.options != EXTERNAL_LOOPBACK) {
- ql_dbg(ql_dbg_user, vha, 0x7020,
- "Internal: current port config = %x\n",
- config[0]);
- if (qla81xx_set_internal_loopback(vha, config,
- new_config)) {
- ql_log(ql_log_warn, vha, 0x7024,
- "Internal loopback failed.\n");
- bsg_job->reply->result =
- (DID_ERROR << 16);
- rval = -EPERM;
- goto done_free_dma_req;
- }
- } else {
- /* For external loopback to work
- * ensure internal loopback is disabled
- */
- if (qla81xx_reset_internal_loopback(vha,
- config, 1)) {
- bsg_job->reply->result =
- (DID_ERROR << 16);
- rval = -EPERM;
- goto done_free_dma_req;
- }
+ ql_dbg(ql_dbg_user, vha, 0x70c0,
+ "elreq.options=%04x\n", elreq.options);
+
+ if (elreq.options == EXTERNAL_LOOPBACK)
+ if (IS_QLA8031(ha))
+ rval = qla81xx_set_loopback_mode(vha,
+ config, new_config, elreq.options);
+ else
+ rval = qla81xx_reset_loopback_mode(vha,
+ config, 1);
+ else
+ rval = qla81xx_set_loopback_mode(vha, config,
+ new_config, elreq.options);
+
+ if (rval) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ rval = -EPERM;
+ goto done_free_dma_req;
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -766,7 +777,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
/* Revert back to original port config
* Also clear internal loopback
*/
- qla81xx_reset_internal_loopback(vha,
+ qla81xx_reset_loopback_mode(vha,
new_config, 0);
}
@@ -1364,7 +1375,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (ha->flags.isp82xx_reset_hdlr_active)
+ if (ha->flags.nic_core_reset_hdlr_active)
return -EBUSY;
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
@@ -1560,6 +1571,276 @@ done:
}
static int
+qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_i2c_access *i2c = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ memcpy(sfp, i2c->buffer, i2c->length);
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ i2c->device, i2c->offset, i2c->length, i2c->option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_i2c_access *i2c = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+ i2c->device, i2c->offset, i2c->length, i2c->option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ memcpy(i2c->buffer, sfp, i2c->length);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t thread_id;
+ uint32_t rval = EXT_STATUS_OK;
+ uint16_t req_sg_cnt = 0;
+ uint16_t rsp_sg_cnt = 0;
+ uint16_t nextlid = 0;
+ uint32_t tot_dsds;
+ srb_t *sp = NULL;
+ uint32_t req_data_len = 0;
+ uint32_t rsp_data_len = 0;
+
+ /* Check the type of the adapter */
+ if (!IS_BIDI_CAPABLE(ha)) {
+ ql_log(ql_log_warn, vha, 0x70a0,
+ "This adapter is not supported\n");
+ rval = EXT_STATUS_NOT_SUPPORTED;
+ goto done;
+ }
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ rval = EXT_STATUS_BUSY;
+ goto done;
+ }
+
+ /* Check if host is online */
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70a1,
+ "Host is not online\n");
+ rval = EXT_STATUS_DEVICE_OFFLINE;
+ goto done;
+ }
+
+ /* Check if cable is plugged in or not */
+ if (vha->device_flags & DFLG_NO_CABLE) {
+ ql_log(ql_log_warn, vha, 0x70a2,
+ "Cable is unplugged...\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ /* Check if the switch is connected or not */
+ if (ha->current_topology != ISP_CFG_F) {
+ ql_log(ql_log_warn, vha, 0x70a3,
+ "Host is not connected to the switch\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ /* Check if operating mode is P2P */
+ if (ha->operating_mode != P2P) {
+ ql_log(ql_log_warn, vha, 0x70a4,
+ "Host is operating mode is not P2p\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ mutex_lock(&ha->selflogin_lock);
+ if (vha->self_login_loop_id == 0) {
+ /* Initialize all required fields of fcport */
+ vha->bidir_fcport.vha = vha;
+ vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
+ vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
+ vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
+ vha->bidir_fcport.loop_id = vha->loop_id;
+
+ if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
+ ql_log(ql_log_warn, vha, 0x70a7,
+ "Failed to login port %06X for bidirectional IOCB\n",
+ vha->bidir_fcport.d_id.b24);
+ mutex_unlock(&ha->selflogin_lock);
+ rval = EXT_STATUS_MAILBOX;
+ goto done;
+ }
+ vha->self_login_loop_id = nextlid - 1;
+
+ }
+ /* Assign the self login loop id to fcport */
+ mutex_unlock(&ha->selflogin_lock);
+
+ vha->bidir_fcport.loop_id = vha->self_login_loop_id;
+
+ req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!req_sg_cnt) {
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!rsp_sg_cnt) {
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_req_sg;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ ql_dbg(ql_dbg_user, vha, 0x70a9,
+ "Dma mapping resulted in different sg counts "
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
+ "%x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_sg;
+ }
+
+ if (req_data_len != rsp_data_len) {
+ rval = EXT_STATUS_BUSY;
+ ql_log(ql_log_warn, vha, 0x70aa,
+ "req_data_len != rsp_data_len\n");
+ goto done_unmap_sg;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+ rsp_data_len = bsg_job->reply_payload.payload_len;
+
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_user, vha, 0x70ac,
+ "Alloc SRB structure failed\n");
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_sg;
+ }
+
+ /*Populate srb->ctx with bidir ctx*/
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->type = SRB_BIDI_CMD;
+ sp->done = qla2x00_bsg_job_done;
+
+ /* Add the read and write sg count */
+ tot_dsds = rsp_sg_cnt + req_sg_cnt;
+
+ rval = qla2x00_start_bidir(sp, vha, tot_dsds);
+ if (rval != EXT_STATUS_OK)
+ goto done_free_srb;
+ /* the bsg request will be completed in the interrupt handler */
+ return rval;
+
+done_free_srb:
+ mempool_free(sp, ha->srb_mempool);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+done:
+
+ /* Return an error vendor specific response
+ * and complete the bsg request
+ */
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_OK) << 16;
+ bsg_job->job_done(bsg_job);
+ /* Always retrun success, vendor rsp carries correct status */
+ return 0;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1596,6 +1877,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_WRITE_FRU_STATUS:
return qla2x00_write_fru_status(bsg_job);
+ case QL_VND_WRITE_I2C:
+ return qla2x00_write_i2c(bsg_job);
+
+ case QL_VND_READ_I2C:
+ return qla2x00_read_i2c(bsg_job);
+
+ case QL_VND_DIAG_IO_CMD:
+ return qla24xx_process_bidir_cmd(bsg_job);
+
default:
bsg_job->reply->result = (DID_ERROR << 16);
bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 70caa63a8930..37b8b7ba7421 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -19,21 +19,41 @@
#define QL_VND_SET_FRU_VERSION 0x0B
#define QL_VND_READ_FRU_STATUS 0x0C
#define QL_VND_WRITE_FRU_STATUS 0x0D
+#define QL_VND_DIAG_IO_CMD 0x0A
+#define QL_VND_WRITE_I2C 0x10
+#define QL_VND_READ_I2C 0x11
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
#define EXT_STATUS_ERR 1
+#define EXT_STATUS_BUSY 2
#define EXT_STATUS_INVALID_PARAM 6
+#define EXT_STATUS_DATA_OVERRUN 7
+#define EXT_STATUS_DATA_UNDERRUN 8
#define EXT_STATUS_MAILBOX 11
#define EXT_STATUS_NO_MEMORY 17
+#define EXT_STATUS_DEVICE_OFFLINE 22
+
+/*
+ * To support bidirectional iocb
+ * BSG Vendor specific returns
+ */
+#define EXT_STATUS_NOT_SUPPORTED 27
+#define EXT_STATUS_INVALID_CFG 28
+#define EXT_STATUS_DMA_ERR 29
+#define EXT_STATUS_TIMEOUT 30
+#define EXT_STATUS_THREAD_FAILED 31
+#define EXT_STATUS_DATA_CMP_FAILED 32
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
#define INT_DEF_LB_ECHO_CMD 1
/* Loopback related definations */
+#define INTERNAL_LOOPBACK 0xF1
#define EXTERNAL_LOOPBACK 0xF2
#define ENABLE_INTERNAL_LOOPBACK 0x02
+#define ENABLE_EXTERNAL_LOOPBACK 0x04
#define INTERNAL_LOOPBACK_MASK 0x000E
#define MAX_ELS_FRAME_PAYLOAD 252
#define ELS_OPCODE_BYTE 0x10
@@ -183,4 +203,12 @@ struct qla_status_reg {
uint8_t reserved[7];
} __packed;
+struct qla_i2c_access {
+ uint16_t device;
+ uint16_t offset;
+ uint16_t option;
+ uint16_t length;
+ uint8_t buffer[0x40];
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fdee5611f3e2..44efe3cc79e6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -11,26 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x1140 | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x114f | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
- * | Device Discovery | 0x2086 | 0x2020-0x2022 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
+ * | Device Discovery | 0x2087 | 0x2020-0x2022, |
+ * | | | 0x2016 |
+ * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
- * | DPC Thread | 0x401c | 0x4002,0x4013 |
- * | Async Events | 0x505f | 0x502b-0x502f |
+ * | DPC Thread | 0x401d | 0x4002,0x4013 |
+ * | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x709f | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
- * | | | 0x708c |
+ * | | | 0x708c, |
+ * | | | 0x70a5,0x70a6, |
+ * | | | 0x70a8,0x70ab, |
+ * | | | 0x70ad-0x70ae |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb054 | 0xb024 |
+ * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
* | Target Mode | 0xe06f | |
@@ -2357,7 +2362,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
/*
* This function is for formatting and logging debug information.
- * It is to be used when vha is not available and pci is availble,
+ * It is to be used when vha is not available and pci is available,
* i.e., before host allocation. It formats the message and logs it
* to the messages file.
* parameters:
@@ -2452,7 +2457,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
/*
* This function is for formatting and logging log messages.
- * It is to be used when vha is not available and pci is availble,
+ * It is to be used when vha is not available and pci is available,
* i.e., before host allocation. It formats the message and logs
* it to the messages file. All the messages are logged irrespective
* of the value of ql2xextended_error_logging.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f278df8cce0f..8f911c0b1e74 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 39007f53aec0..a9725bf5527b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -115,6 +115,82 @@
#define WRT_REG_DWORD(addr, data) writel(data,addr)
/*
+ * ISP83XX specific remote register addresses
+ */
+#define QLA83XX_LED_PORT0 0x00201320
+#define QLA83XX_LED_PORT1 0x00201328
+#define QLA83XX_IDC_DEV_STATE 0x22102384
+#define QLA83XX_IDC_MAJOR_VERSION 0x22102380
+#define QLA83XX_IDC_MINOR_VERSION 0x22102398
+#define QLA83XX_IDC_DRV_PRESENCE 0x22102388
+#define QLA83XX_IDC_DRIVER_ACK 0x2210238c
+#define QLA83XX_IDC_CONTROL 0x22102390
+#define QLA83XX_IDC_AUDIT 0x22102394
+#define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c
+#define QLA83XX_DRIVER_LOCKID 0x22102104
+#define QLA83XX_DRIVER_LOCK 0x8111c028
+#define QLA83XX_DRIVER_UNLOCK 0x8111c02c
+#define QLA83XX_FLASH_LOCKID 0x22102100
+#define QLA83XX_FLASH_LOCK 0x8111c010
+#define QLA83XX_FLASH_UNLOCK 0x8111c014
+#define QLA83XX_DEV_PARTINFO1 0x221023e0
+#define QLA83XX_DEV_PARTINFO2 0x221023e4
+#define QLA83XX_FW_HEARTBEAT 0x221020b0
+#define QLA83XX_PEG_HALT_STATUS1 0x221020a8
+#define QLA83XX_PEG_HALT_STATUS2 0x221020ac
+
+/* 83XX: Macros defining 8200 AEN Reason codes */
+#define IDC_DEVICE_STATE_CHANGE BIT_0
+#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
+#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
+#define IDC_HEARTBEAT_FAILURE BIT_3
+
+/* 83XX: Macros defining 8200 AEN Error-levels */
+#define ERR_LEVEL_NON_FATAL 0x1
+#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
+#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
+
+/* 83XX: Macros for IDC Version */
+#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
+#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
+
+/* 83XX: Macros for scheduling dpc tasks */
+#define QLA83XX_NIC_CORE_RESET 0x1
+#define QLA83XX_IDC_STATE_HANDLER 0x2
+#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
+
+/* 83XX: Macros for defining IDC-Control bits */
+#define QLA83XX_IDC_RESET_DISABLED BIT_0
+#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
+
+/* 83XX: Macros for different timeouts */
+#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
+#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
+#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
+
+/* 83XX: Macros for defining class in DEV-Partition Info register */
+#define QLA83XX_CLASS_TYPE_NONE 0x0
+#define QLA83XX_CLASS_TYPE_NIC 0x1
+#define QLA83XX_CLASS_TYPE_FCOE 0x2
+#define QLA83XX_CLASS_TYPE_ISCSI 0x3
+
+/* 83XX: Macros for IDC Lock-Recovery stages */
+#define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for
+ * lock-recovery
+ */
+#define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */
+
+/* 83XX: Macros for IDC Audit type */
+#define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of
+ * dev-state change to NEED-RESET
+ * or NEED-QUIESCENT
+ */
+#define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of
+ * reset-recovery completion is
+ * second
+ */
+
+/*
* The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
* 133Mhz slot.
*/
@@ -129,6 +205,7 @@
#define MAX_FIBRE_DEVICES_2400 2048
#define MAX_FIBRE_DEVICES_LOOP 128
#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
+#define LOOPID_MAP_SIZE (ha->max_fibre_devices)
#define MAX_FIBRE_LUNS 0xFFFF
#define MAX_HOST_COUNT 16
@@ -259,6 +336,7 @@ struct srb_iocb {
#define SRB_ADISC_CMD 6
#define SRB_TM_CMD 7
#define SRB_SCSI_CMD 8
+#define SRB_BIDI_CMD 9
typedef struct srb {
atomic_t ref_count;
@@ -594,6 +672,20 @@ typedef struct {
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
+/* 83XX FCoE specific */
+#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
+
+/* Interrupt type codes */
+#define INTR_ROM_MB_SUCCESS 0x1
+#define INTR_ROM_MB_FAILED 0x2
+#define INTR_MB_SUCCESS 0x10
+#define INTR_MB_FAILED 0x11
+#define INTR_ASYNC_EVENT 0x12
+#define INTR_RSP_QUE_UPDATE 0x13
+#define INTR_RSP_QUE_UPDATE_83XX 0x14
+#define INTR_ATIO_QUE_UPDATE 0x1C
+#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
+
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
/*
@@ -718,6 +810,7 @@ typedef struct {
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
+#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
@@ -1375,9 +1468,10 @@ typedef struct {
} cont_a64_entry_t;
#define PO_MODE_DIF_INSERT 0
-#define PO_MODE_DIF_REMOVE BIT_0
-#define PO_MODE_DIF_PASS BIT_1
-#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1)
+#define PO_MODE_DIF_REMOVE 1
+#define PO_MODE_DIF_PASS 2
+#define PO_MODE_DIF_REPLACE 3
+#define PO_MODE_DIF_TCP_CKSUM 6
#define PO_ENABLE_DIF_BUNDLING BIT_8
#define PO_ENABLE_INCR_GUARD_SEED BIT_3
#define PO_DISABLE_INCR_REF_TAG BIT_5
@@ -1509,6 +1603,13 @@ typedef struct {
#define CS_RETRY 0x82 /* Driver defined */
#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */
+#define CS_BIDIR_RD_OVERRUN 0x700
+#define CS_BIDIR_RD_WR_OVERRUN 0x707
+#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715
+#define CS_BIDIR_RD_UNDERRUN 0x1500
+#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507
+#define CS_BIDIR_RD_WR_UNDERRUN 0x1515
+#define CS_BIDIR_DMA 0x200
/*
* Status entry status flags
*/
@@ -2373,6 +2474,11 @@ struct qla_statistics {
uint64_t output_bytes;
};
+struct bidi_statistics {
+ unsigned long long io_count;
+ unsigned long long transfer_bytes;
+};
+
/* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
@@ -2509,14 +2615,16 @@ struct qla_hw_data {
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
uint32_t isp82xx_fw_hung:1;
+ uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
- uint32_t isp82xx_reset_hdlr_active:1;
- uint32_t isp82xx_reset_owner:1;
+ uint32_t nic_core_reset_hdlr_active:1;
+ uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
- /* 30 bits */
+ uint32_t idc_compl_status:1;
+ /* 32 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2670,6 +2778,16 @@ struct qla_hw_data {
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+/* Bit 21 of fw_attributes decides the MCTP capabilities */
+#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
+ ((ha)->fw_attributes_ext[0] & BIT_0))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
/* HBA serial number */
uint8_t serial0;
@@ -2753,6 +2871,7 @@ struct qla_hw_data {
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
int notify_dcbx_comp;
+ struct mutex selflogin_lock;
/* Basic firmware related information. */
uint16_t fw_major_version;
@@ -2784,7 +2903,12 @@ struct qla_hw_data {
int fw_dump_reading;
dma_addr_t eft_dma;
void *eft;
-
+/* Current size of mctp dump is 0x086064 bytes */
+#define MCTP_DUMP_SIZE 0x086064
+ dma_addr_t mctp_dump_dma;
+ void *mctp_dump;
+ int mctp_dumped;
+ int mctp_dump_reading;
uint32_t chain_offset;
struct dentry *dfs_dir;
struct dentry *dfs_fce;
@@ -2896,8 +3020,8 @@ struct qla_hw_data {
unsigned long mn_win_crb;
unsigned long ms_win_crb;
int qdr_sn_window;
- uint32_t nx_dev_init_timeout;
- uint32_t nx_reset_timeout;
+ uint32_t fcoe_dev_init_timeout;
+ uint32_t fcoe_reset_timeout;
rwlock_t hw_lock;
uint16_t portnum; /* port number */
int link_width;
@@ -2918,6 +3042,20 @@ struct qla_hw_data {
void *md_dump;
uint32_t md_dump_size;
+ void *loop_id_map;
+
+ /* QLA83XX IDC specific fields */
+ uint32_t idc_audit_ts;
+
+ /* DPC low-priority workqueue */
+ struct workqueue_struct *dpc_lp_wq;
+ struct work_struct idc_aen;
+ /* DPC high-priority workqueue */
+ struct workqueue_struct *dpc_hp_wq;
+ struct work_struct nic_core_reset;
+ struct work_struct idc_state_handler;
+ struct work_struct nic_core_unrecoverable;
+
struct qlt_hw_data tgt;
};
@@ -2985,6 +3123,13 @@ typedef struct scsi_qla_host {
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
+ uint16_t self_login_loop_id; /* host adapter loop id
+ * get it on self login
+ */
+ fc_port_t bidir_fcport; /* fcport used for bidir cmnds
+ * no need of allocating it for
+ * each command
+ */
port_id_t d_id; /* Host adapter port id */
uint8_t marker_needed;
@@ -3038,6 +3183,7 @@ typedef struct scsi_qla_host {
int seconds_since_last_heartbeat;
struct fc_host_statistics fc_host_stat;
struct qla_statistics qla_stats;
+ struct bidi_statistics bidi_stats;
atomic_t vref_count;
} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 499c74e39ee5..706c4f7bc7c9 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 6d7d7758c797..59524aa0ab32 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -381,6 +381,44 @@ struct init_cb_24xx {
/*
* ISP queue - command entry structure definition.
*/
+#define COMMAND_BIDIRECTIONAL 0x75
+struct cmd_bidir {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined */
+ uint8_t entry_status; /* Entry status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT hanlde. */
+
+ uint16_t timeout; /* Commnad timeout. */
+
+ uint16_t wr_dseg_count; /* Write Data segment count. */
+ uint16_t rd_dseg_count; /* Read Data segment count. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+#define BD_WRAP_BACK BIT_3
+#define BD_READ_DATA BIT_1
+#define BD_WRITE_DATA BIT_0
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint16_t reserved[2]; /* Reserved */
+
+ uint32_t rd_byte_count; /* Total Byte count Read. */
+ uint32_t wr_byte_count; /* Total Byte count write. */
+
+ uint8_t port_id[3]; /* PortID of destination port.*/
+ uint8_t vp_index;
+
+ uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
+ uint16_t fcp_data_dseg_len; /* Data segment length. */
+};
+
#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
struct cmd_type_6 {
uint8_t entry_type; /* Entry type. */
@@ -1130,7 +1168,7 @@ struct mid_db_entry_24xx {
/*
* Virtual Port Control IOCB
*/
-#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */
+#define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */
struct vp_ctrl_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
@@ -1166,7 +1204,7 @@ struct vp_ctrl_entry_24xx {
/*
* Modify Virtual Port Configuration IOCB
*/
-#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */
+#define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */
struct vp_config_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
@@ -1502,7 +1540,10 @@ struct access_chip_rsp_84xx {
/*
* ISP83xx mailbox commands
*/
-#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+#define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */
+#define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */
+#define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */
/* Flash access control option field bit definitions */
#define FAC_OPT_FORCE_SEMAPHORE BIT_15
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9eacd2df111b..6acb39785a46 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -48,7 +48,7 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
-extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *);
+extern void qla2x00_quiesce_io(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
@@ -76,6 +76,14 @@ extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
extern fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
+
+extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
+extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
+extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
+extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
+extern void qla83xx_reset_ownership(scsi_qla_host_t *);
+extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -133,6 +141,20 @@ extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
+extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
+extern void qla83xx_service_idc_aen(struct work_struct *);
+extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
+extern void qla83xx_idc_state_handler_work(struct work_struct *);
+extern void qla83xx_nic_core_reset_work(struct work_struct *);
+
+extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
+extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
+extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
+extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+
/*
* Global Functions in qla_mid.c source file.
*/
@@ -188,6 +210,8 @@ extern int qla2x00_start_sp(srb_t *);
extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
+extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
@@ -376,6 +400,9 @@ qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
extern int
qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+extern int
+qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -419,7 +446,11 @@ extern void qla24xx_beacon_blink(struct scsi_qla_host *);
extern void qla83xx_beacon_blink(struct scsi_qla_host *);
extern int qla82xx_beacon_on(struct scsi_qla_host *);
extern int qla82xx_beacon_off(struct scsi_qla_host *);
-extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
+extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
+extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
+extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
+ uint32_t, uint16_t *);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
@@ -527,7 +558,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* PCI related functions */
extern int qla82xx_pci_config(struct scsi_qla_host *);
extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
-extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
extern int qla82xx_pci_region_offset(struct pci_dev *, int);
extern int qla82xx_iospace_config(struct qla_hw_data *);
@@ -580,6 +610,7 @@ extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
extern int qla82xx_idc_lock(struct qla_hw_data *);
extern void qla82xx_idc_unlock(struct qla_hw_data *);
extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 05260d25fe46..f4e4bd7c3f4d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1131,7 +1131,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
return ret;
rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
- 0xfa, mb, BIT_1|BIT_0);
+ 0xfa, mb, BIT_1);
if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
if (rval == QLA_MEMORY_ALLOC_FAILED)
ql_dbg(ql_dbg_disc, vha, 0x2085,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a44653b42161..799a58bb9859 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -77,7 +77,7 @@ qla2x00_sp_free(void *data, void *ptr)
/* Asynchronous Login/Logout Routines -------------------------------------- */
-static inline unsigned long
+unsigned long
qla2x00_get_async_timeout(struct scsi_qla_host *vha)
{
unsigned long tmo;
@@ -429,6 +429,79 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
+int
+qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_major_ver, idc_minor_ver;
+ uint16_t config[4];
+
+ qla83xx_idc_lock(vha, 0);
+
+ /* SV: TODO: Assign initialization timeout from
+ * flash-info / other param
+ */
+ ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
+ ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
+
+ /* Set our fcoe function presence */
+ if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb077,
+ "Error while setting DRV-Presence.\n");
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+
+ /*
+ * On first protocol driver load:
+ * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
+ * register.
+ * Others: Check compatibility with current IDC Major version.
+ */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
+ if (ha->flags.nic_core_reset_owner) {
+ /* Set IDC Major version */
+ idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
+ qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
+
+ /* Clearing IDC-Lock-Recovery register */
+ qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
+ } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
+ /*
+ * Clear further IDC participation if we are not compatible with
+ * the current IDC Major Version.
+ */
+ ql_log(ql_log_warn, vha, 0xb07d,
+ "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
+ idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
+ __qla83xx_clear_drv_presence(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+ /* Each function sets its supported Minor version. */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
+ idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
+ qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
+
+ if (ha->flags.nic_core_reset_owner) {
+ memset(config, 0, sizeof(config));
+ if (!qla81xx_get_port_config(vha, config))
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_READY);
+ }
+
+ rval = qla83xx_idc_state_handler(vha);
+
+exit:
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
/*
* qla2x00_initialize_adapter
* Initialize board.
@@ -537,6 +610,14 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
+ /* Load the NIC Core f/w if we are the first protocol driver. */
+ if (IS_QLA8031(ha)) {
+ rval = qla83xx_nic_core_fw_load(vha);
+ if (rval)
+ ql_log(ql_log_warn, vha, 0x0124,
+ "Error in initializing NIC Core f/w.\n");
+ }
+
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
@@ -686,7 +767,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* PCIe -- adjust Maximum Read Request Size (2048). */
if (pci_is_pcie(ha->pdev))
- pcie_set_readrq(ha->pdev, 2048);
+ pcie_set_readrq(ha->pdev, 4096);
pci_disable_rom(ha->pdev);
@@ -722,7 +803,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
/* PCIe -- adjust Maximum Read Request Size (2048). */
if (pci_is_pcie(ha->pdev))
- pcie_set_readrq(ha->pdev, 2048);
+ pcie_set_readrq(ha->pdev, 4096);
pci_disable_rom(ha->pdev);
@@ -1480,7 +1561,8 @@ enable_82xx_npiv:
"ISP Firmware failed checksum.\n");
goto failed;
}
- }
+ } else
+ goto failed;
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Enable proper parity. */
@@ -1825,7 +1907,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (ha->flags.npiv_supported) {
- if (ha->operating_mode == LOOP)
+ if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
}
@@ -2682,11 +2764,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES_LOOP;
- ql_dbg(ql_dbg_disc, vha, 0x2016,
- "Getting FCAL position map.\n");
- if (ql2xextended_error_logging & ql_dbg_disc)
- qla2x00_get_fcal_position_map(vha, NULL);
-
/* Get list of logged in devices. */
memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
@@ -2753,6 +2830,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (loop_id > LAST_LOCAL_LOOP_ID)
continue;
+ memset(new_fcport, 0, sizeof(fc_port_t));
+
/* Fill in member data. */
new_fcport->d_id.b.domain = domain;
new_fcport->d_id.b.area = area;
@@ -3285,7 +3364,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
*/
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
fcport->d_id.b24 = new_fcport->d_id.b24;
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_clear_loop_id(fcport);
fcport->flags |= (FCF_FABRIC_DEVICE |
FCF_LOGIN_NEEDED);
break;
@@ -3306,7 +3385,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_clear_loop_id(fcport);
}
break;
@@ -3352,71 +3431,32 @@ int
qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
{
int rval;
- int found;
- fc_port_t *fcport;
- uint16_t first_loop_id;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
unsigned long flags = 0;
rval = QLA_SUCCESS;
- /* Save starting loop ID. */
- first_loop_id = dev->loop_id;
-
- for (;;) {
- /* Skip loop ID if already used by adapter. */
- if (dev->loop_id == vha->loop_id)
- dev->loop_id++;
-
- /* Skip reserved loop IDs. */
- while (qla2x00_is_reserved_id(vha, dev->loop_id))
- dev->loop_id++;
-
- /* Reset loop ID if passed the end. */
- if (dev->loop_id > ha->max_loop_id) {
- /* first loop ID. */
- dev->loop_id = ha->min_external_loopid;
- }
-
- /* Check for loop ID being already in use. */
- found = 0;
- fcport = NULL;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- list_for_each_entry(fcport, &vp->vp_fcports, list) {
- if (fcport->loop_id == dev->loop_id &&
- fcport != dev) {
- /* ID possibly in use */
- found++;
- break;
- }
- }
- if (found)
- break;
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->vport_slock, flags);
- /* If not in use then it is free to use. */
- if (!found) {
- ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
- "Assigning new loopid=%x, portid=%x.\n",
- dev->loop_id, dev->d_id.b24);
- break;
- }
+ dev->loop_id = find_first_zero_bit(ha->loop_id_map,
+ LOOPID_MAP_SIZE);
+ if (dev->loop_id >= LOOPID_MAP_SIZE ||
+ qla2x00_is_reserved_id(vha, dev->loop_id)) {
+ dev->loop_id = FC_NO_LOOP_ID;
+ rval = QLA_FUNCTION_FAILED;
+ } else
+ set_bit(dev->loop_id, ha->loop_id_map);
- /* ID in use. Try next value. */
- dev->loop_id++;
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
- /* If wrap around. No free ID to use. */
- if (dev->loop_id == first_loop_id) {
- dev->loop_id = FC_NO_LOOP_ID;
- rval = QLA_FUNCTION_FAILED;
- break;
- }
- }
+ if (rval == QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+ "Assigning new loopid=%x, portid=%x.\n",
+ dev->loop_id, dev->d_id.b24);
+ else
+ ql_log(ql_log_warn, dev->vha, 0x2087,
+ "No loop_id's available, portid=%x.\n",
+ dev->d_id.b24);
return (rval);
}
@@ -3616,7 +3656,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_clear_loop_id(fcport);
fcport->login_retry = 0;
rval = 3;
@@ -3775,8 +3815,363 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
+/* Assumes idc_lock always held on entry */
+void
+qla83xx_reset_ownership(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence, drv_presence_mask;
+ uint32_t dev_part_info1, dev_part_info2, class_type;
+ uint32_t class_type_mask = 0x3;
+ uint16_t fcoe_other_function = 0xffff, i;
+
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ for (i = 0; i < 8; i++) {
+ class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
+ if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+ (i != ha->portnum)) {
+ fcoe_other_function = i;
+ break;
+ }
+ }
+ if (fcoe_other_function == 0xffff) {
+ for (i = 0; i < 8; i++) {
+ class_type = ((dev_part_info2 >> (i * 4)) &
+ class_type_mask);
+ if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+ ((i + 8) != ha->portnum)) {
+ fcoe_other_function = i + 8;
+ break;
+ }
+ }
+ }
+ /*
+ * Prepare drv-presence mask based on fcoe functions present.
+ * However consider only valid physical fcoe function numbers (0-15).
+ */
+ drv_presence_mask = ~((1 << (ha->portnum)) |
+ ((fcoe_other_function == 0xffff) ?
+ 0 : (1 << (fcoe_other_function))));
+
+ /* We are the reset owner iff:
+ * - No other protocol drivers present.
+ * - This is the lowest among fcoe functions. */
+ if (!(drv_presence & drv_presence_mask) &&
+ (ha->portnum < fcoe_other_function)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb07f,
+ "This host is Reset owner.\n");
+ ha->flags.nic_core_reset_owner = 1;
+ }
+}
+
+int
+__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ if (rval == QLA_SUCCESS) {
+ drv_ack |= (1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_set_drv_ack(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+int
+__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ if (rval == QLA_SUCCESS) {
+ drv_ack &= ~(1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_clear_drv_ack(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+const char *
+qla83xx_dev_state_to_string(uint32_t dev_state)
+{
+ switch (dev_state) {
+ case QLA8XXX_DEV_COLD:
+ return "COLD/RE-INIT";
+ case QLA8XXX_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case QLA8XXX_DEV_READY:
+ return "READY";
+ case QLA8XXX_DEV_NEED_RESET:
+ return "NEED RESET";
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ return "NEED QUIESCENT";
+ case QLA8XXX_DEV_FAILED:
+ return "FAILED";
+ case QLA8XXX_DEV_QUIESCENT:
+ return "QUIESCENT";
+ default:
+ return "Unknown";
+ }
+}
+
+/* Assumes idc-lock always held on entry */
+void
+qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_audit_reg = 0, duration_secs = 0;
+
+ switch (audit_type) {
+ case IDC_AUDIT_TIMESTAMP:
+ ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
+ idc_audit_reg = (ha->portnum) |
+ (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+ break;
+
+ case IDC_AUDIT_COMPLETION:
+ duration_secs = ((jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
+ idc_audit_reg = (ha->portnum) |
+ (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+ break;
+
+ default:
+ ql_log(ql_log_warn, vha, 0xb078,
+ "Invalid audit type specified.\n");
+ break;
+ }
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_initiating_reset(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_control, dev_state;
+
+ __qla83xx_get_idc_control(vha, &idc_control);
+ if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
+ ql_log(ql_log_info, vha, 0xb080,
+ "NIC Core reset has been disabled. idc-control=0x%x\n",
+ idc_control);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Set NEED-RESET iff in READY state and we are the reset-owner */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
+ qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+ } else {
+ const char *state = qla83xx_dev_state_to_string(dev_state);
+ ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+
+ /* SV: XXX: Is timeout required here? */
+ /* Wait for IDC state change READY -> NEED_RESET */
+ while (dev_state == QLA8XXX_DEV_READY) {
+ qla83xx_idc_unlock(vha, 0);
+ msleep(200);
+ qla83xx_idc_lock(vha, 0);
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ }
+ }
+
+ /* Send IDC ack by writing to drv-ack register */
+ __qla83xx_set_drv_ack(vha);
+
+ return QLA_SUCCESS;
+}
+
+int
+__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+ return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+int
+__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+ return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_get_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+int
+qla83xx_check_driver_presence(scsi_qla_host_t *vha)
+{
+ uint32_t drv_presence = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (drv_presence & (1 << ha->portnum))
+ return QLA_SUCCESS;
+ else
+ return QLA_TEST_FAILED;
+}
+
+int
+qla83xx_nic_core_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb058,
+ "Entered %s().\n", __func__);
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0xb059,
+ "Device in unrecoverable FAILED state.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla83xx_idc_lock(vha, 0);
+
+ if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb05a,
+ "Function=0x%x has been removed from IDC participation.\n",
+ ha->portnum);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+
+ qla83xx_reset_ownership(vha);
+
+ rval = qla83xx_initiating_reset(vha);
+
+ /*
+ * Perform reset if we are the reset-owner,
+ * else wait till IDC state changes to READY/FAILED.
+ */
+ if (rval == QLA_SUCCESS) {
+ rval = qla83xx_idc_state_handler(vha);
+
+ if (rval == QLA_SUCCESS)
+ ha->flags.nic_core_hung = 0;
+ __qla83xx_clear_drv_ack(vha);
+ }
+
+exit:
+ qla83xx_idc_unlock(vha, 0);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
+
+ return rval;
+}
+
+int
+qla2xxx_mctp_dump(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!IS_MCTP_CAPABLE(ha)) {
+ /* This message can be removed from the final version */
+ ql_log(ql_log_info, vha, 0x506d,
+ "This board is not MCTP capable\n");
+ return rval;
+ }
+
+ if (!ha->mctp_dump) {
+ ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
+ MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
+
+ if (!ha->mctp_dump) {
+ ql_log(ql_log_warn, vha, 0x506e,
+ "Failed to allocate memory for mctp dump\n");
+ return rval;
+ }
+ }
+
+#define MCTP_DUMP_STR_ADDR 0x00000000
+ rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
+ MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x506f,
+ "Failed to capture mctp dump\n");
+ } else {
+ ql_log(ql_log_info, vha, 0x5070,
+ "Mctp dump capture for host (%ld/%p).\n",
+ vha->host_no, ha->mctp_dump);
+ ha->mctp_dumped = 1;
+ }
+
+ if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
+ ha->flags.nic_core_reset_hdlr_active = 1;
+ rval = qla83xx_restart_nic_firmware(vha);
+ if (rval)
+ /* NIC Core reset failed. */
+ ql_log(ql_log_warn, vha, 0x5071,
+ "Failed to restart nic firmware\n");
+ else
+ ql_dbg(ql_dbg_p3p, vha, 0xb084,
+ "Restarted NIC firmware successfully.\n");
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ }
+
+ return rval;
+
+}
+
/*
-* qla82xx_quiescent_state_cleanup
+* qla2x00_quiesce_io
* Description: This function will block the new I/Os
* Its not aborting any I/Os as context
* is not destroyed during quiescence
@@ -3784,20 +4179,20 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
* return : void
*/
void
-qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
+qla2x00_quiesce_io(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- ql_dbg(ql_dbg_p3p, vha, 0xb002,
- "Performing ISP error recovery - ha=%p.\n", ha);
+ ql_dbg(ql_dbg_dpc, vha, 0x401d,
+ "Quiescing I/O - ha=%p.\n", ha);
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
list_for_each_entry(vp, &ha->vp_list, list)
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vp, 0);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -3913,6 +4308,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb05c,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, vha, 0xb073,
+ "Error while clearing DRV-Presence.\n");
+ }
+
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure)) {
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -4021,6 +4424,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb05d,
+ "Setting back fcoe driver presence.\n");
+ if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, vha, 0xb074,
+ "Error while setting DRV-Presence.\n");
+ }
} else {
ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
__func__);
@@ -5088,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
+ if (IS_T10_PI_CAPABLE(ha))
+ nv->frame_payload_size &= ~7;
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 6e457643c639..c0462c04c885 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -57,6 +57,20 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
return fcp;
}
+static inline void
+qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+ int i;
+
+ if (IS_FWI2_CAPABLE(ha))
+ return;
+
+ for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+ set_bit(i, ha->loop_id_map);
+ set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+ set_bit(BROADCAST, ha->loop_id_map);
+}
+
static inline int
qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
{
@@ -69,6 +83,18 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
}
static inline void
+qla2x00_clear_loop_id(fc_port_t *fcport) {
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+ return;
+
+ clear_bit(fcport->loop_id, ha->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+}
+
+static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 70dbf53d9e0f..03b752632839 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t guard = scsi_host_get_guard(cmd->device->host);
- /* We only support T10 DIF right now */
- if (guard != SHOST_DIX_GUARD_CRC) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
- "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
- return 0;
- }
-
/* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0;
@@ -172,10 +165,11 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts |= PO_MODE_DIF_REMOVE;
break;
case SCSI_PROT_READ_PASS:
- *fw_prot_opts |= PO_MODE_DIF_PASS;
- break;
case SCSI_PROT_WRITE_PASS:
- *fw_prot_opts |= PO_MODE_DIF_PASS;
+ if (guard & SHOST_DIX_GUARD_IP)
+ *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+ else
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
break;
default: /* Normal Request */
*fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
case SCSI_PROT_DIF_TYPE0:
@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
pkt->ref_tag_mask[3] = 0xff;
break;
}
-
- ql_dbg(ql_dbg_io, vha, 0x3009,
- "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
- "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
- pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
- scsi_get_prot_type(cmd), cmd);
}
struct qla2_sgx {
@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int i;
uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-
- uint8_t *cp;
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- ql_dbg(ql_dbg_io, vha, 0x300a,
- "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
+
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- cp = page_address(sg_page(sg)) + sg->offset;
- ql_dbg(ql_dbg_io, vha, 0x300b,
- "User data buffer=%p for cmd=%p.\n", cp, cmd);
- }
}
/* Null termination */
*cur_dsd++ = 0;
@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- uint8_t *cp;
cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- ql_dbg(ql_dbg_io, vha, 0x3027,
- "%s(): %p, sg_entry %d - "
- "addr=0x%x0x%x, len=%d.\n",
- __func__, cur_dsd, i,
- LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
- }
+
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
- cp = page_address(sg_page(sg)) + sg->offset;
- ql_dbg(ql_dbg_io, vha, 0x3028,
- "%s(): Protection Data buffer = %p.\n", __func__,
- cp);
- }
avail_dsds--;
}
/* Null termination */
@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!qla2x00_hba_err_chk_enabled(sp))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+ /* HBA error checking enabled */
+ else if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+ || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE2))
+ fw_prot_opts |= BIT_10;
+ else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE3)
+ fw_prot_opts |= BIT_11;
+ }
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
@@ -1858,7 +1831,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
}
if (index == MAX_OUTSTANDING_COMMANDS) {
ql_log(ql_log_warn, vha, 0x700b,
- "No room on oustanding cmd array.\n");
+ "No room on outstanding cmd array.\n");
goto queuing_error;
}
@@ -2665,3 +2638,201 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}
+
+static void
+qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
+ struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ uint32_t req_data_len = 0;
+ uint32_t rsp_data_len = 0;
+ struct scatterlist *sg;
+ int index;
+ int entry_count = 1;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ /*Update entry type to indicate bidir command */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+
+ /* Set the transfer direction, in this set both flags
+ * Also set the BD_WRAP_BACK flag, firmware will take care
+ * assigning DID=SID for outgoing pkts.
+ */
+ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+ BD_WRAP_BACK);
+
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
+ cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
+ cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+
+ vha->bidi_stats.transfer_bytes += req_data_len;
+ vha->bidi_stats.io_count++;
+
+ /* Only one dsd is available for bidirectional IOCB, remaining dsds
+ * are bundled in continuation iocb
+ */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+
+ index = 0;
+
+ for_each_sg(bsg_job->request_payload.sg_list, sg,
+ bsg_job->request_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* For read request DSD will always goes to continuation IOCB
+ * and follow the write DSD. If there is room on the current IOCB
+ * then it is added to that IOCB else new continuation IOCB is
+ * allocated.
+ */
+ for_each_sg(bsg_job->reply_payload.sg_list, sg,
+ bsg_job->reply_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* This value should be same as number of IOCB required for this cmd */
+ cmd_pkt->entry_count = entry_count;
+}
+
+int
+qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
+{
+
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ uint32_t handle;
+ uint32_t index;
+ uint16_t req_cnt;
+ uint16_t cnt;
+ uint32_t *clr_ptr;
+ struct cmd_bidir *cmd_pkt = NULL;
+ struct rsp_que *rsp;
+ struct req_que *req;
+ int rval = EXT_STATUS_OK;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
+
+ rval = QLA_SUCCESS;
+
+ rsp = ha->rsp_q_map[0];
+ req = vha->req;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return EXT_STATUS_MAILBOX;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == MAX_OUTSTANDING_COMMANDS) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ /* Calculate number of IOCB required */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ /* Check for room on request queue. */
+ if (req->cnt < req_cnt + 2) {
+ if (ha->mqenable)
+ cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else if (IS_QLA82XX(ha))
+ cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
+ cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ else
+ cnt = qla2x00_debounce_register(
+ ISP_REQ_Q_OUT(ha, &reg->isp));
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < req_cnt + 2) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID (of vha)*/
+ cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
+ cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = vha->d_id.b.area;
+ cmd_pkt->port_id[2] = vha->d_id.b.domain;
+
+ qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ req->cnt -= req_cnt;
+
+ /* Send the command to the firmware */
+ wmb();
+ qla2x00_start_iocbs(vha, req);
+queuing_error:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6f67a9d4998b..5733811ce8e7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -294,6 +294,11 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"%04x %04x %04x %04x %04x %04x %04x.\n",
event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
mb[4], mb[5], mb[6]);
+ if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
+ vha->hw->flags.idc_compl_status = 1;
+ if (vha->hw->notify_dcbx_comp)
+ complete(&vha->hw->dcbx_comp);
+ }
/* Acknowledgement needed? [Notify && non-zero timeout]. */
timeout = (descr >> 8) & 0xf;
@@ -332,6 +337,166 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha)
return link_speed;
}
+void
+qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /*
+ * 8200 AEN Interpretation:
+ * mb[0] = AEN code
+ * mb[1] = AEN Reason code
+ * mb[2] = LSW of Peg-Halt Status-1 Register
+ * mb[6] = MSW of Peg-Halt Status-1 Register
+ * mb[3] = LSW of Peg-Halt Status-2 register
+ * mb[7] = MSW of Peg-Halt Status-2 register
+ * mb[4] = IDC Device-State Register value
+ * mb[5] = IDC Driver-Presence Register value
+ */
+ ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
+ "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
+ mb[0], mb[1], mb[2], mb[6]);
+ ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
+ "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
+ "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
+
+ if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
+ IDC_HEARTBEAT_FAILURE)) {
+ ha->flags.nic_core_hung = 1;
+ ql_log(ql_log_warn, vha, 0x5060,
+ "83XX: F/W Error Reported: Check if reset required.\n");
+
+ if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
+ uint32_t protocol_engine_id, fw_err_code, err_level;
+
+ /*
+ * IDC_PEG_HALT_STATUS_CHANGE interpretation:
+ * - PEG-Halt Status-1 Register:
+ * (LSW = mb[2], MSW = mb[6])
+ * Bits 0-7 = protocol-engine ID
+ * Bits 8-28 = f/w error code
+ * Bits 29-31 = Error-level
+ * Error-level 0x1 = Non-Fatal error
+ * Error-level 0x2 = Recoverable Fatal error
+ * Error-level 0x4 = UnRecoverable Fatal error
+ * - PEG-Halt Status-2 Register:
+ * (LSW = mb[3], MSW = mb[7])
+ */
+ protocol_engine_id = (mb[2] & 0xff);
+ fw_err_code = (((mb[2] & 0xff00) >> 8) |
+ ((mb[6] & 0x1fff) << 8));
+ err_level = ((mb[6] & 0xe000) >> 13);
+ ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
+ "Register: protocol_engine_id=0x%x "
+ "fw_err_code=0x%x err_level=0x%x.\n",
+ protocol_engine_id, fw_err_code, err_level);
+ ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
+ "Register: 0x%x%x.\n", mb[7], mb[3]);
+ if (err_level == ERR_LEVEL_NON_FATAL) {
+ ql_log(ql_log_warn, vha, 0x5063,
+ "Not a fatal error, f/w has recovered "
+ "iteself.\n");
+ } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
+ ql_log(ql_log_fatal, vha, 0x5064,
+ "Recoverable Fatal error: Chip reset "
+ "required.\n");
+ qla83xx_schedule_work(vha,
+ QLA83XX_NIC_CORE_RESET);
+ } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
+ ql_log(ql_log_fatal, vha, 0x5065,
+ "Unrecoverable Fatal error: Set FAILED "
+ "state, reboot required.\n");
+ qla83xx_schedule_work(vha,
+ QLA83XX_NIC_CORE_UNRECOVERABLE);
+ }
+ }
+
+ if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
+ uint16_t peg_fw_state, nw_interface_link_up;
+ uint16_t nw_interface_signal_detect, sfp_status;
+ uint16_t htbt_counter, htbt_monitor_enable;
+ uint16_t sfp_additonal_info, sfp_multirate;
+ uint16_t sfp_tx_fault, link_speed, dcbx_status;
+
+ /*
+ * IDC_NIC_FW_REPORTED_FAILURE interpretation:
+ * - PEG-to-FC Status Register:
+ * (LSW = mb[2], MSW = mb[6])
+ * Bits 0-7 = Peg-Firmware state
+ * Bit 8 = N/W Interface Link-up
+ * Bit 9 = N/W Interface signal detected
+ * Bits 10-11 = SFP Status
+ * SFP Status 0x0 = SFP+ transceiver not expected
+ * SFP Status 0x1 = SFP+ transceiver not present
+ * SFP Status 0x2 = SFP+ transceiver invalid
+ * SFP Status 0x3 = SFP+ transceiver present and
+ * valid
+ * Bits 12-14 = Heartbeat Counter
+ * Bit 15 = Heartbeat Monitor Enable
+ * Bits 16-17 = SFP Additional Info
+ * SFP info 0x0 = Unregocnized transceiver for
+ * Ethernet
+ * SFP info 0x1 = SFP+ brand validation failed
+ * SFP info 0x2 = SFP+ speed validation failed
+ * SFP info 0x3 = SFP+ access error
+ * Bit 18 = SFP Multirate
+ * Bit 19 = SFP Tx Fault
+ * Bits 20-22 = Link Speed
+ * Bits 23-27 = Reserved
+ * Bits 28-30 = DCBX Status
+ * DCBX Status 0x0 = DCBX Disabled
+ * DCBX Status 0x1 = DCBX Enabled
+ * DCBX Status 0x2 = DCBX Exchange error
+ * Bit 31 = Reserved
+ */
+ peg_fw_state = (mb[2] & 0x00ff);
+ nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
+ nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
+ sfp_status = ((mb[2] & 0x0c00) >> 10);
+ htbt_counter = ((mb[2] & 0x7000) >> 12);
+ htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
+ sfp_additonal_info = (mb[6] & 0x0003);
+ sfp_multirate = ((mb[6] & 0x0004) >> 2);
+ sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
+ link_speed = ((mb[6] & 0x0070) >> 4);
+ dcbx_status = ((mb[6] & 0x7000) >> 12);
+
+ ql_log(ql_log_warn, vha, 0x5066,
+ "Peg-to-Fc Status Register:\n"
+ "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
+ "nw_interface_signal_detect=0x%x"
+ "\nsfp_statis=0x%x.\n ", peg_fw_state,
+ nw_interface_link_up, nw_interface_signal_detect,
+ sfp_status);
+ ql_log(ql_log_warn, vha, 0x5067,
+ "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
+ "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
+ htbt_counter, htbt_monitor_enable,
+ sfp_additonal_info, sfp_multirate);
+ ql_log(ql_log_warn, vha, 0x5068,
+ "sfp_tx_fault=0x%x, link_state=0x%x, "
+ "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
+ dcbx_status);
+
+ qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+ }
+
+ if (mb[1] & IDC_HEARTBEAT_FAILURE) {
+ ql_log(ql_log_warn, vha, 0x5069,
+ "Heartbeat Failure encountered, chip reset "
+ "required.\n");
+
+ qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+ }
+ }
+
+ if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
+ ql_log(ql_log_info, vha, 0x506a,
+ "IDC Device-State changed = 0x%x.\n", mb[4]);
+ qla83xx_schedule_work(vha, MBA_IDC_AEN);
+ }
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -681,8 +846,7 @@ skip_rio:
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
atomic_set(&vha->loop_down_timer, 0);
- if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
- atomic_read(&vha->loop_state) != LOOP_DEAD) {
+ if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
@@ -822,11 +986,28 @@ skip_rio:
"FCF Configuration Error -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
break;
- case MBA_IDC_COMPLETE:
case MBA_IDC_NOTIFY:
+ /* See if we need to quiesce any I/O */
+ if (IS_QLA8031(vha->hw))
+ if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ case MBA_IDC_COMPLETE:
case MBA_IDC_TIME_EXT:
- qla81xx_idc_event(vha, mb[0], mb[1]);
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
+ qla81xx_idc_event(vha, mb[0], mb[1]);
break;
+
+ case MBA_IDC_AEN:
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ break;
+
default:
ql_dbg(ql_dbg_async, vha, 0x5057,
"Unknown AEN:%04x %04x %04x %04x\n",
@@ -1414,7 +1595,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
struct scsi_dif_tuple {
__be16 guard; /* Checksum */
- __be16 app_tag; /* APPL identifer */
+ __be16 app_tag; /* APPL identifier */
__be32 ref_tag; /* Target LBA or indirect LBA */
};
@@ -1546,6 +1727,149 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
return 1;
}
+static void
+qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ struct req_que *req, uint32_t index)
+{
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ uint16_t comp_status;
+ uint16_t scsi_status;
+ uint16_t thread_id;
+ uint32_t rval = EXT_STATUS_OK;
+ struct fc_bsg_job *bsg_job = NULL;
+ sts_entry_t *sts;
+ struct sts_entry_24xx *sts24;
+ sts = (sts_entry_t *) pkt;
+ sts24 = (struct sts_entry_24xx *) pkt;
+
+ /* Validate handle. */
+ if (index >= MAX_OUTSTANDING_COMMANDS) {
+ ql_log(ql_log_warn, vha, 0x70af,
+ "Invalid SCSI completion handle 0x%x.\n", index);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp = req->outstanding_cmds[index];
+ if (sp) {
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ } else {
+ ql_log(ql_log_warn, vha, 0x70b0,
+ "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
+ req->id, index);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ comp_status = le16_to_cpu(sts24->comp_status);
+ scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+ } else {
+ comp_status = le16_to_cpu(sts->comp_status);
+ scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+ }
+
+ thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ switch (comp_status) {
+ case CS_COMPLETE:
+ if (scsi_status == 0) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ rval = EXT_STATUS_OK;
+ }
+ goto done;
+
+ case CS_DATA_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b1,
+ "Command completed with date overrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_DATA_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b2,
+ "Command completed with date underrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+ case CS_BIDIR_RD_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b3,
+ "Command completed with read data overrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_WR_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b4,
+ "Command completed with read and write data overrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b5,
+ "Command completed with read data over and write data "
+ "underrun thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b6,
+ "Command completed with read data data underrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b7,
+ "Command completed with read data under and write data "
+ "overrun thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_RD_WR_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b8,
+ "Command completed with read and write data underrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_DMA:
+ ql_dbg(ql_dbg_user, vha, 0x70b9,
+ "Command completed with data DMA error thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DMA_ERR;
+ break;
+
+ case CS_TIMEOUT:
+ ql_dbg(ql_dbg_user, vha, 0x70ba,
+ "Command completed with timeout thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_TIMEOUT;
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x70bb,
+ "Command completed with completion status=0x%x "
+ "thread_id=%d\n", comp_status, thread_id);
+ rval = EXT_STATUS_ERR;
+ break;
+ }
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+done:
+ /* Return the vendor specific reply to API */
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ /* Always return DID_OK, bsg will send the vendor specific response
+ * in this case only */
+ sp->done(vha, sp, (DID_OK << 6));
+
+}
+
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
* @ha: SCSI driver HA context
@@ -1573,12 +1897,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
struct req_que *req;
int logit = 1;
int res = 0;
+ uint16_t state_flags = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+ state_flags = le16_to_cpu(sts24->state_flags);
} else {
comp_status = le16_to_cpu(sts->comp_status);
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
@@ -1587,17 +1913,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
que = MSW(sts->handle);
req = ha->req_q_map[que];
- /* Fast path completion. */
- if (comp_status == CS_COMPLETE && scsi_status == 0) {
- qla2x00_process_completed_request(vha, req, handle);
-
- return;
- }
-
/* Validate handle. */
if (handle < MAX_OUTSTANDING_COMMANDS) {
sp = req->outstanding_cmds[handle];
- req->outstanding_cmds[handle] = NULL;
} else
sp = NULL;
@@ -1612,6 +1930,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
qla2xxx_wake_dpc(vha);
return;
}
+
+ if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
+ qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
+ return;
+ }
+
+ /* Fast path completion. */
+ if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_process_completed_request(vha, req, handle);
+
+ return;
+ }
+
+ req->outstanding_cmds[handle] = NULL;
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
@@ -1830,7 +2162,21 @@ check_scsi_status:
case CS_DIF_ERROR:
logit = qla2x00_handle_dif_error(sp, sts24);
+ res = cp->result;
break;
+
+ case CS_TRANSPORT:
+ res = DID_ERROR << 16;
+
+ if (!IS_PI_SPLIT_DET_CAPABLE(ha))
+ break;
+
+ if (state_flags & BIT_4)
+ scmd_printk(KERN_WARNING, cp,
+ "Unsupported device '%s' found.\n",
+ cp->device->vendor);
+ break;
+
default:
res = DID_ERROR << 16;
break;
@@ -2150,7 +2496,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
unsigned long iter;
uint32_t stat;
uint32_t hccr;
- uint16_t mb[4];
+ uint16_t mb[8];
struct rsp_que *rsp;
unsigned long flags;
@@ -2191,29 +2537,29 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
switch (stat & 0xff) {
- case 0x1:
- case 0x2:
- case 0x10:
- case 0x11:
+ case INTR_ROM_MB_SUCCESS:
+ case INTR_ROM_MB_FAILED:
+ case INTR_MB_SUCCESS:
+ case INTR_MB_FAILED:
qla24xx_mbx_completion(vha, MSW(stat));
status |= MBX_INTERRUPT;
break;
- case 0x12:
+ case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
mb[1] = RD_REG_WORD(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
- case 0x13:
- case 0x14:
+ case INTR_RSP_QUE_UPDATE:
+ case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
- case 0x1C: /* ATIO queue updated */
+ case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
break;
- case 0x1D: /* ATIO and response queues updated */
+ case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
qla24xx_process_response_queue(vha, rsp);
break;
@@ -2224,6 +2570,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
+ if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
+ ndelay(3500);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2306,7 +2654,7 @@ qla24xx_msix_default(int irq, void *dev_id)
int status;
uint32_t stat;
uint32_t hccr;
- uint16_t mb[4];
+ uint16_t mb[8];
unsigned long flags;
rsp = (struct rsp_que *) dev_id;
@@ -2342,29 +2690,29 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
switch (stat & 0xff) {
- case 0x1:
- case 0x2:
- case 0x10:
- case 0x11:
+ case INTR_ROM_MB_SUCCESS:
+ case INTR_ROM_MB_FAILED:
+ case INTR_MB_SUCCESS:
+ case INTR_MB_FAILED:
qla24xx_mbx_completion(vha, MSW(stat));
status |= MBX_INTERRUPT;
break;
- case 0x12:
+ case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
mb[1] = RD_REG_WORD(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
- case 0x13:
- case 0x14:
+ case INTR_RSP_QUE_UPDATE:
+ case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
- case 0x1C: /* ATIO queue updated */
+ case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
break;
- case 0x1D: /* ATIO and response queues updated */
+ case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
qla24xx_process_response_queue(vha, rsp);
break;
@@ -2570,7 +2918,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha))
+ !IS_QLA8001(ha) && !IS_QLA82XX(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
@@ -2581,6 +2929,11 @@ skip_msix:
} else
ql_log(ql_log_warn, vha, 0x0039,
"MSI-X; Falling back-to INTa mode -- %d.\n", ret);
+
+ /* Skip INTx on ISP82xx. */
+ if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2595,21 +2948,9 @@ skip_msi:
clear_risc_ints:
- /*
- * FIXME: Noted that 8014s were being dropped during NK testing.
- * Timing deltas during MSI-X/INTa transitions?
- */
- if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
- goto fail;
spin_lock_irq(&ha->hardware_lock);
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
- } else {
+ if (!IS_FWI2_CAPABLE(ha))
WRT_REG_WORD(&reg->isp.semaphore, 0);
- WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
- }
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d5ce92c0a8fc..18c509fae555 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (ha->flags.isp82xx_fw_hung) {
+ if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, vha, 0x1004,
@@ -232,7 +232,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if (ha->flags.isp82xx_fw_hung) {
+ if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -369,7 +369,7 @@ premature_exit:
mbx_done:
if (rval) {
- ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
+ ql_log(ql_log_warn, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
} else {
@@ -533,7 +533,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
- if (IS_QLA83XX(vha->hw))
+ if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
@@ -559,18 +559,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->phy_version[1] = mcp->mb[9] >> 8;
ha->phy_version[2] = mcp->mb[9] & 0xff;
}
- if (IS_QLA83XX(ha)) {
- if (mcp->mb[6] & BIT_15) {
- ha->fw_attributes_h = mcp->mb[15];
- ha->fw_attributes_ext[0] = mcp->mb[16];
- ha->fw_attributes_ext[1] = mcp->mb[17];
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
- "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
- __func__, mcp->mb[15], mcp->mb[6]);
- } else
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
- "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
- __func__, mcp->mb[6]);
+ if (IS_FWI2_CAPABLE(ha)) {
+ ha->fw_attributes_h = mcp->mb[15];
+ ha->fw_attributes_ext[0] = mcp->mb[16];
+ ha->fw_attributes_ext[1] = mcp->mb[17];
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
+ "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[15], mcp->mb[6]);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
+ "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[17], mcp->mb[16]);
}
failed:
@@ -3408,7 +3406,6 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
return rval;
}
-
/* 84XX Support **************************************************************/
struct cs84xx_mgmt_cmd {
@@ -4428,7 +4425,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
"Entered %s.\n", __func__);
/* Integer part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
+ rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
+ BIT_13|BIT_12|BIT_0);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
@@ -4437,7 +4435,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
*temp = byte;
/* Fraction part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
+ rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
+ BIT_13|BIT_12|BIT_0);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
@@ -4741,7 +4740,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
}
int
-qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
{
int rval;
struct qla_hw_data *ha = vha->hw;
@@ -4814,3 +4813,186 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
return rval;
}
+int
+qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long retry_max_time = jiffies + (2 * HZ);
+
+ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
+
+retry_rd_reg:
+ mcp->mb[0] = MBC_READ_REMOTE_REG;
+ mcp->mb[1] = LSW(reg);
+ mcp->mb[2] = MSW(reg);
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x114c,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ *data = (mcp->mb[3] | (mcp->mb[4] << 16));
+ if (*data == QLA8XXX_BAD_VALUE) {
+ /*
+ * During soft-reset CAMRAM register reads might
+ * return 0xbad0bad0. So retry for MAX of 2 sec
+ * while reading camram registers.
+ */
+ if (time_after(jiffies, retry_max_time)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1141,
+ "Failure to read CAMRAM register. "
+ "data=0x%x.\n", *data);
+ return QLA_FUNCTION_FAILED;
+ }
+ msleep(100);
+ goto retry_rd_reg;
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1144,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ ha->isp_ops->fw_dump(vha, 0);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
+ uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint8_t subcode = (uint8_t)options;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
+ mcp->mb[1] = options;
+ mcp->out_mb = MBX_1|MBX_0;
+ if (subcode & BIT_2) {
+ mcp->mb[2] = LSW(start_addr);
+ mcp->mb[3] = MSW(start_addr);
+ mcp->mb[4] = LSW(end_addr);
+ mcp->mb[5] = MSW(end_addr);
+ mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
+ }
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (!(subcode & (BIT_2 | BIT_5)))
+ mcp->in_mb |= MBX_4|MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1147,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+ mcp->mb[4]);
+ ha->isp_ops->fw_dump(vha, 0);
+ } else {
+ if (subcode & BIT_5)
+ *sector_size = mcp->mb[1];
+ else if (subcode & (BIT_6 | BIT_7)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1148,
+ "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+ } else if (subcode & (BIT_3 | BIT_4)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1149,
+ "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+ uint32_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_MCTP_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+ mcp->mb[1] = LSW(addr);
+ mcp->mb[2] = MSW(req_dma);
+ mcp->mb[3] = LSW(req_dma);
+ mcp->mb[4] = MSW(size);
+ mcp->mb[5] = LSW(size);
+ mcp->mb[6] = MSW(MSD(req_dma));
+ mcp->mb[7] = LSW(MSD(req_dma));
+ mcp->mb[8] = MSW(addr);
+ /* Setting RAM ID to valid */
+ mcp->mb[10] |= BIT_7;
+ /* For MCTP RAM ID is 0x40 */
+ mcp->mb[10] |= 0x40;
+
+ mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
+ MBX_0;
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x114e,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 3e8b32419e68..bd4708a422cd 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -476,7 +476,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->req = base_vha->req;
host->can_queue = base_vha->req->length + 128;
- host->this_id = 255;
host->cmd_per_lun = 3;
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
@@ -643,7 +642,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
ql_log(ql_log_fatal, base_vha, 0x00da,
- "Failed to allocte memory for request_ring.\n");
+ "Failed to allocate memory for request_ring.\n");
goto que_failed;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 9ce3a8f8754f..7807444418ff 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1612,25 +1612,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
}
/* PCI related functions */
-char *
-qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
-{
- int pcie_reg;
- struct qla_hw_data *ha = vha->hw;
- char lwstr[6];
- uint16_t lnk;
-
- pcie_reg = pci_pcie_cap(ha->pdev);
- pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
- ha->link_width = (lnk >> 4) & 0x3f;
-
- strcpy(str, "PCIe (");
- strcat(str, "2.5Gb/s ");
- snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
- strcat(str, lwstr);
- return str;
-}
-
int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
{
unsigned long val = 0;
@@ -2322,6 +2303,29 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
}
inline void
+qla82xx_set_idc_version(scsi_qla_host_t *vha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+ ql_log(ql_log_info, vha, 0xb082,
+ "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
+ } else {
+ idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
+ if (idc_ver != QLA82XX_IDC_VERSION)
+ ql_log(ql_log_info, vha, 0xb083,
+ "qla2xxx driver IDC version %d is not compatible "
+ "with IDC version %d of the other drivers\n",
+ QLA82XX_IDC_VERSION, idc_ver);
+ }
+}
+
+inline void
qla82xx_set_drv_active(scsi_qla_host_t *vha)
{
uint32_t drv_active;
@@ -2355,7 +2359,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
uint32_t drv_state;
int rval;
- if (ha->flags.isp82xx_reset_owner)
+ if (ha->flags.nic_core_reset_owner)
return 1;
else {
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
@@ -2864,7 +2868,7 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
timeout = msleep_interruptible(200);
if (timeout) {
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ QLA8XXX_DEV_FAILED);
return QLA_FUNCTION_FAILED;
}
@@ -2895,10 +2899,7 @@ dev_initialize:
/* set to DEV_INITIALIZING */
ql_log(ql_log_info, vha, 0x009e,
"HW State: INITIALIZING.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
-
- /* Driver that sets device state to initializating sets IDC version */
- qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
qla82xx_idc_unlock(ha);
rval = qla82xx_start_firmware(vha);
@@ -2908,14 +2909,14 @@ dev_initialize:
ql_log(ql_log_fatal, vha, 0x00ad,
"HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
return rval;
}
dev_ready:
ql_log(ql_log_info, vha, 0x00ae,
"HW State: READY.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
return QLA_SUCCESS;
}
@@ -2939,7 +2940,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
if (vha->flags.online) {
/*Block any further I/O and wait for pending cmnds to complete*/
- qla82xx_quiescent_state_cleanup(vha);
+ qla2x00_quiesce_io(vha);
}
/* Set the quiescence ready bit */
@@ -2964,7 +2965,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
"DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_READY);
+ QLA8XXX_DEV_READY);
ql_log(ql_log_info, vha, 0xb025,
"HW State: DEV_READY.\n");
qla82xx_idc_unlock(ha);
@@ -2985,10 +2986,10 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
}
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
/* everyone acked so set the state to DEV_QUIESCENCE */
- if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
+ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
ql_log(ql_log_info, vha, 0xb026,
"HW State: DEV_QUIESCENT.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
}
}
@@ -3018,8 +3019,8 @@ qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
return dev_state;
}
-static void
-qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
+void
+qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3027,9 +3028,10 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
ql_log(ql_log_fatal, vha, 0x00b8,
"Disabling the board.\n");
- qla82xx_idc_lock(ha);
- qla82xx_clear_drv_active(ha);
- qla82xx_idc_unlock(ha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ }
/* Set DEV_FAILED flag to disable timer */
vha->device_flags |= DFLG_DEV_FAILED;
@@ -3068,7 +3070,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- if (!ha->flags.isp82xx_reset_owner) {
+ if (!ha->flags.nic_core_reset_owner) {
ql_dbg(ql_dbg_p3p, vha, 0xb028,
"reset_acknowledged by 0x%x\n", ha->portnum);
qla82xx_set_rst_ready(ha);
@@ -3080,7 +3082,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
/* wait for 10 seconds for reset ack from all functions */
- reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
@@ -3092,7 +3094,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
drv_state, drv_active, dev_state, active_mask);
while (drv_state != drv_active &&
- dev_state != QLA82XX_DEV_INITIALIZING) {
+ dev_state != QLA8XXX_DEV_INITIALIZING) {
if (time_after_eq(jiffies, reset_timeout)) {
ql_log(ql_log_warn, vha, 0x00b5,
"Reset timeout.\n");
@@ -3103,7 +3105,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- if (ha->flags.isp82xx_reset_owner)
+ if (ha->flags.nic_core_reset_owner)
drv_active &= active_mask;
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
}
@@ -3119,11 +3121,11 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
- if (dev_state != QLA82XX_DEV_INITIALIZING &&
- dev_state != QLA82XX_DEV_COLD) {
+ if (dev_state != QLA8XXX_DEV_INITIALIZING &&
+ dev_state != QLA8XXX_DEV_COLD) {
ql_log(ql_log_info, vha, 0x00b7,
"HW State: COLD/RE-INIT.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
qla82xx_set_rst_ready(ha);
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
@@ -3230,8 +3232,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
int loopcount = 0;
qla82xx_idc_lock(ha);
- if (!vha->flags.init_done)
+ if (!vha->flags.init_done) {
qla82xx_set_drv_active(vha);
+ qla82xx_set_idc_version(vha);
+ }
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
old_dev_state = dev_state;
@@ -3241,7 +3245,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
/* wait for 30 seconds for device to go ready */
- dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
while (1) {
@@ -3265,18 +3269,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
}
switch (dev_state) {
- case QLA82XX_DEV_READY:
- ha->flags.isp82xx_reset_owner = 0;
- goto exit;
- case QLA82XX_DEV_COLD:
+ case QLA8XXX_DEV_READY:
+ ha->flags.nic_core_reset_owner = 0;
+ goto rel_lock;
+ case QLA8XXX_DEV_COLD:
rval = qla82xx_device_bootstrap(vha);
break;
- case QLA82XX_DEV_INITIALIZING:
+ case QLA8XXX_DEV_INITIALIZING:
qla82xx_idc_unlock(ha);
msleep(1000);
qla82xx_idc_lock(ha);
break;
- case QLA82XX_DEV_NEED_RESET:
+ case QLA8XXX_DEV_NEED_RESET:
if (!ql2xdontresethba)
qla82xx_need_reset_handler(vha);
else {
@@ -3285,31 +3289,31 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
}
dev_init_timeout = jiffies +
- (ha->nx_dev_init_timeout * HZ);
+ (ha->fcoe_dev_init_timeout * HZ);
break;
- case QLA82XX_DEV_NEED_QUIESCENT:
+ case QLA8XXX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
/* Reset timeout value after quiescence handler */
- dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
* HZ);
break;
- case QLA82XX_DEV_QUIESCENT:
+ case QLA8XXX_DEV_QUIESCENT:
/* Owner will exit and other will wait for the state
* to get changed
*/
if (ha->flags.quiesce_owner)
- goto exit;
+ goto rel_lock;
qla82xx_idc_unlock(ha);
msleep(1000);
qla82xx_idc_lock(ha);
/* Reset timeout value after quiescence handler */
- dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
* HZ);
break;
- case QLA82XX_DEV_FAILED:
- qla82xx_dev_failed_handler(vha);
+ case QLA8XXX_DEV_FAILED:
+ qla8xxx_dev_failed_handler(vha);
rval = QLA_FUNCTION_FAILED;
goto exit;
default:
@@ -3319,8 +3323,9 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
}
loopcount++;
}
-exit:
+rel_lock:
qla82xx_idc_unlock(ha);
+exit:
return rval;
}
@@ -3368,22 +3373,30 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
/* don't poll if reset is going on */
- if (!ha->flags.isp82xx_reset_hdlr_active) {
+ if (!ha->flags.nic_core_reset_hdlr_active) {
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (qla82xx_check_temp(vha)) {
set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
ha->flags.isp82xx_fw_hung = 1;
qla82xx_clear_pending_mbx(vha);
- } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6001,
"Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6002,
"Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ } else if (dev_state == QLA8XXX_DEV_FAILED &&
+ !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
+ vha->flags.online == 1) {
+ ql_log(ql_log_warn, vha, 0xb055,
+ "Adapter state is failed. Offlining.\n");
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3445,12 +3458,12 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
uint32_t dev_state;
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_READY) {
+ if (dev_state == QLA8XXX_DEV_READY) {
ql_log(ql_log_info, vha, 0xb02f,
"HW State: NEED RESET\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_NEED_RESET);
- ha->flags.isp82xx_reset_owner = 1;
+ QLA8XXX_DEV_NEED_RESET);
+ ha->flags.nic_core_reset_owner = 1;
ql_dbg(ql_dbg_p3p, vha, 0xb030,
"reset_owner is 0x%x\n", ha->portnum);
} else
@@ -3481,7 +3494,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
"Device in failed state, exiting.\n");
return QLA_SUCCESS;
}
- ha->flags.isp82xx_reset_hdlr_active = 1;
+ ha->flags.nic_core_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
@@ -3495,7 +3508,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
ha->flags.isp82xx_fw_hung = 0;
- ha->flags.isp82xx_reset_hdlr_active = 0;
+ ha->flags.nic_core_reset_hdlr_active = 0;
qla82xx_restart_isp(vha);
}
@@ -4030,7 +4043,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
if (r_addr & 0xf) {
ql_log(ql_log_warn, vha, 0xb033,
- "Read addr 0x%x not 16 bytes alligned\n", r_addr);
+ "Read addr 0x%x not 16 bytes aligned\n", r_addr);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6eb210e3cc63..6c953e8c08f0 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -542,14 +542,15 @@
#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
/* Every driver should use these Device State */
-#define QLA82XX_DEV_COLD 1
-#define QLA82XX_DEV_INITIALIZING 2
-#define QLA82XX_DEV_READY 3
-#define QLA82XX_DEV_NEED_RESET 4
-#define QLA82XX_DEV_NEED_QUIESCENT 5
-#define QLA82XX_DEV_FAILED 6
-#define QLA82XX_DEV_QUIESCENT 7
+#define QLA8XXX_DEV_COLD 1
+#define QLA8XXX_DEV_INITIALIZING 2
+#define QLA8XXX_DEV_READY 3
+#define QLA8XXX_DEV_NEED_RESET 4
+#define QLA8XXX_DEV_NEED_QUIESCENT 5
+#define QLA8XXX_DEV_FAILED 6
+#define QLA8XXX_DEV_QUIESCENT 7
#define MAX_STATES 8 /* Increment if new state added */
+#define QLA8XXX_BAD_VALUE 0xbad0bad0
#define QLA82XX_IDC_VERSION 1
#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fb8cd3847d4b..c7dd29876836 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -113,11 +113,11 @@ MODULE_PARM_DESC(ql2xfdmienable,
static int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
- "Maximum queue depth to report for target devices.");
+ "Maximum queue depth to set for each LUN. "
+ "Default is 32.");
-/* Do not change the value of this after module load */
-int ql2xenabledif = 0;
-module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+int ql2xenabledif = 2;
+module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF "
" Default is 0 - No DIF Support. 1 - Enable it"
@@ -1078,7 +1078,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
cmd->device->lun, type) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
- "wait for peding cmds failed for cmd=%p.\n", cmd);
+ "wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
@@ -1177,7 +1177,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
"BUS RESET %s nexus=%ld:%d:%d.\n",
- (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
@@ -1357,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
+ if (IS_T10_PI_CAPABLE(vha->hw))
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, req->max_q_depth);
else
@@ -1919,7 +1922,7 @@ static struct isp_operations qla82xx_isp_ops = {
.nvram_config = qla81xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla82xx_load_risc,
- .pci_info_str = qla82xx_pci_info_str,
+ .pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla82xx_intr_handler,
.enable_intrs = qla82xx_enable_intrs,
@@ -2149,7 +2152,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_qla_host_t *base_vha = NULL;
struct qla_hw_data *ha;
char pci_info[30];
- char fw_str[30];
+ char fw_str[30], wq_name[30];
struct scsi_host_template *sht;
int bars, mem_only = 0;
uint16_t req_length = 0, rsp_length = 0;
@@ -2203,12 +2206,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mem_only = mem_only;
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->vport_slock);
+ mutex_init(&ha->selflogin_lock);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
- if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
+ IS_QLA83XX(ha))
pdev->needs_freset = 1;
ha->prev_topology = 0;
@@ -2318,6 +2323,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA83XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
@@ -2416,7 +2422,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
host->max_id = ha->max_fibre_devices;
- host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -2499,7 +2504,7 @@ que_init:
if (IS_QLA82XX(ha)) {
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ QLA8XXX_DEV_FAILED);
qla82xx_idc_unlock(ha);
ql_log(ql_log_fatal, base_vha, 0x00d7,
"HW State: FAILED.\n");
@@ -2542,6 +2547,20 @@ que_init:
*/
qla2xxx_wake_dpc(base_vha);
+ if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
+ sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
+ ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
+
+ sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
+ ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
+ INIT_WORK(&ha->idc_state_handler,
+ qla83xx_idc_state_handler_work);
+ INIT_WORK(&ha->nic_core_unrecoverable,
+ qla83xx_nic_core_unrecoverable_work);
+ }
+
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@@ -2557,7 +2576,7 @@ skip_dpc:
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
- int prot = 0;
+ int prot = 0, guard;
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -2570,7 +2589,14 @@ skip_dpc:
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
- scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+
+ guard = SHOST_DIX_GUARD_CRC;
+
+ if (IS_PI_IPGUARD_CAPABLE(ha) &&
+ (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+ guard |= SHOST_DIX_GUARD_IP;
+
+ scsi_host_set_guard(host, guard);
} else
base_vha->flags.difdix_supported = 0;
}
@@ -2750,6 +2776,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
mutex_unlock(&ha->vport_lock);
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+ "Error while clearing DRV-Presence.\n");
+ }
+
set_bit(UNLOADING, &base_vha->dpc_flags);
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
@@ -2771,6 +2805,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->wq = NULL;
}
+ /* Cancel all work and destroy DPC workqueues */
+ if (ha->dpc_lp_wq) {
+ cancel_work_sync(&ha->idc_aen);
+ destroy_workqueue(ha->dpc_lp_wq);
+ ha->dpc_lp_wq = NULL;
+ }
+
+ if (ha->dpc_hp_wq) {
+ cancel_work_sync(&ha->nic_core_reset);
+ cancel_work_sync(&ha->idc_state_handler);
+ cancel_work_sync(&ha->nic_core_unrecoverable);
+ destroy_workqueue(ha->dpc_hp_wq);
+ ha->dpc_hp_wq = NULL;
+ }
+
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread;
@@ -2837,7 +2886,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
qla2x00_stop_dpc_thread(vha);
qla25xx_delete_queues(vha);
-
if (ha->flags.fce_enabled)
qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -2872,6 +2920,7 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
list_del(&fcport->list);
+ qla2x00_clear_loop_id(fcport);
kfree(fcport);
fcport = NULL;
}
@@ -3169,6 +3218,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
}
INIT_LIST_HEAD(&ha->vp_list);
+
+ /* Allocate memory for our loop_id bitmap */
+ ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+ GFP_KERNEL);
+ if (!ha->loop_id_map)
+ goto fail_async_pd;
+ else {
+ qla2x00_set_reserved_loop_ids(ha);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+ "loop_id_map=%p. \n", ha->loop_id_map);
+ }
+
return 1;
fail_async_pd:
@@ -3280,6 +3341,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
{
qla2x00_free_fw_dump(ha);
+ if (ha->mctp_dump)
+ dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
+ ha->mctp_dump_dma);
+
if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
@@ -3352,6 +3417,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
kfree(ha->nvram);
kfree(ha->npiv_info);
kfree(ha->swl);
+ kfree(ha->loop_id_map);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
@@ -3687,13 +3753,651 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
}
if (fcport->login_retry == 0 && status != QLA_SUCCESS)
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_clear_loop_id(fcport);
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
}
+/* Schedule work on any of the dpc-workqueues */
+void
+qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
+{
+ struct qla_hw_data *ha = base_vha->hw;
+
+ switch (work_code) {
+ case MBA_IDC_AEN: /* 0x8200 */
+ if (ha->dpc_lp_wq)
+ queue_work(ha->dpc_lp_wq, &ha->idc_aen);
+ break;
+
+ case QLA83XX_NIC_CORE_RESET: /* 0x1 */
+ if (!ha->flags.nic_core_reset_hdlr_active) {
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
+ } else
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
+ "NIC Core reset is already active. Skip "
+ "scheduling it again.\n");
+ break;
+ case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
+ break;
+ case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
+ break;
+ default:
+ ql_log(ql_log_warn, base_vha, 0xb05f,
+ "Unknow work-code=0x%x.\n", work_code);
+ }
+
+ return;
+}
+
+/* Work: Perform NIC Core Unrecoverable state handling */
+void
+qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, nic_core_unrecoverable);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ qla83xx_reset_ownership(base_vha);
+ if (ha->flags.nic_core_reset_owner) {
+ ha->flags.nic_core_reset_owner = 0;
+ qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+ }
+ qla83xx_idc_unlock(base_vha, 0);
+}
+
+/* Work: Execute IDC state handler */
+void
+qla83xx_idc_state_handler_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, idc_state_handler);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ if (dev_state == QLA8XXX_DEV_FAILED ||
+ dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
+ qla83xx_idc_state_handler(base_vha);
+ qla83xx_idc_unlock(base_vha, 0);
+}
+
+int
+qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
+{
+ int rval = QLA_SUCCESS;
+ unsigned long heart_beat_wait = jiffies + (1 * HZ);
+ uint32_t heart_beat_counter1, heart_beat_counter2;
+
+ do {
+ if (time_after(jiffies, heart_beat_wait)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
+ "Nic Core f/w is not alive.\n");
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+ &heart_beat_counter1);
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(100);
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+ &heart_beat_counter2);
+ qla83xx_idc_unlock(base_vha, 0);
+ } while (heart_beat_counter1 == heart_beat_counter2);
+
+ return rval;
+}
+
+/* Work: Perform NIC Core Reset handling */
+void
+qla83xx_nic_core_reset_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, nic_core_reset);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ if (IS_QLA2031(ha)) {
+ if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0xb081,
+ "Failed to dump mctp\n");
+ return;
+ }
+
+ if (!ha->flags.nic_core_reset_hdlr_active) {
+ if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ &dev_state);
+ qla83xx_idc_unlock(base_vha, 0);
+ if (dev_state != QLA8XXX_DEV_NEED_RESET) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
+ "Nic Core f/w is alive.\n");
+ return;
+ }
+ }
+
+ ha->flags.nic_core_reset_hdlr_active = 1;
+ if (qla83xx_nic_core_reset(base_vha)) {
+ /* NIC Core reset failed. */
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
+ "NIC Core reset failed.\n");
+ }
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ }
+}
+
+/* Work: Handle 8200 IDC aens */
+void
+qla83xx_service_idc_aen(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, idc_aen);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state, idc_control;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
+ qla83xx_idc_unlock(base_vha, 0);
+ if (dev_state == QLA8XXX_DEV_NEED_RESET) {
+ if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
+ "Application requested NIC Core Reset.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+ } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
+ QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
+ "Other protocol driver requested NIC Core Reset.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+ }
+ } else if (dev_state == QLA8XXX_DEV_FAILED ||
+ dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+ }
+}
+
+static void
+qla83xx_wait_logic(void)
+{
+ int i;
+
+ /* Yield CPU */
+ if (!in_interrupt()) {
+ /*
+ * Wait about 200ms before retrying again.
+ * This controls the number of retries for single
+ * lock operation.
+ */
+ msleep(100);
+ schedule();
+ } else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /* This a nop instr on i386 */
+ }
+}
+
+int
+qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
+{
+ int rval;
+ uint32_t data;
+ uint32_t idc_lck_rcvry_stage_mask = 0x3;
+ uint32_t idc_lck_rcvry_owner_mask = 0x3c;
+ struct qla_hw_data *ha = base_vha->hw;
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
+ if (rval)
+ return rval;
+
+ if ((data & idc_lck_rcvry_stage_mask) > 0) {
+ return QLA_SUCCESS;
+ } else {
+ data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
+ rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+ data);
+ if (rval)
+ return rval;
+
+ msleep(200);
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+ &data);
+ if (rval)
+ return rval;
+
+ if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
+ data &= (IDC_LOCK_RECOVERY_STAGE2 |
+ ~(idc_lck_rcvry_stage_mask));
+ rval = qla83xx_wr_reg(base_vha,
+ QLA83XX_IDC_LOCK_RECOVERY, data);
+ if (rval)
+ return rval;
+
+ /* Forcefully perform IDC UnLock */
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
+ &data);
+ if (rval)
+ return rval;
+ /* Clear lock-id by setting 0xff */
+ rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ 0xff);
+ if (rval)
+ return rval;
+ /* Clear lock-recovery by setting 0x0 */
+ rval = qla83xx_wr_reg(base_vha,
+ QLA83XX_IDC_LOCK_RECOVERY, 0x0);
+ if (rval)
+ return rval;
+ } else
+ return QLA_SUCCESS;
+ }
+
+ return rval;
+}
+
+int
+qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t o_drv_lockid, n_drv_lockid;
+ unsigned long lock_recovery_timeout;
+
+ lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
+retry_lockid:
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
+ if (rval)
+ goto exit;
+
+ /* MAX wait time before forcing IDC Lock recovery = 2 secs */
+ if (time_after_eq(jiffies, lock_recovery_timeout)) {
+ if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
+ return QLA_SUCCESS;
+ else
+ return QLA_FUNCTION_FAILED;
+ }
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
+ if (rval)
+ goto exit;
+
+ if (o_drv_lockid == n_drv_lockid) {
+ qla83xx_wait_logic();
+ goto retry_lockid;
+ } else
+ return QLA_SUCCESS;
+
+exit:
+ return rval;
+}
+
+void
+qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+ uint16_t options = (requester_id << 15) | BIT_6;
+ uint32_t data;
+ struct qla_hw_data *ha = base_vha->hw;
+
+ /* IDC-lock implementation using driver-lock/lock-id remote registers */
+retry_lock:
+ if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
+ == QLA_SUCCESS) {
+ if (data) {
+ /* Setting lock-id to our function-number */
+ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ ha->portnum);
+ } else {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
+ "Failed to acquire IDC lock. retrying...\n");
+
+ /* Retry/Perform IDC-Lock recovery */
+ if (qla83xx_idc_lock_recovery(base_vha)
+ == QLA_SUCCESS) {
+ qla83xx_wait_logic();
+ goto retry_lock;
+ } else
+ ql_log(ql_log_warn, base_vha, 0xb075,
+ "IDC Lock recovery FAILED.\n");
+ }
+
+ }
+
+ return;
+
+ /* XXX: IDC-lock implementation using access-control mbx */
+retry_lock2:
+ if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
+ "Failed to acquire IDC lock. retrying...\n");
+ /* Retry/Perform IDC-Lock recovery */
+ if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
+ qla83xx_wait_logic();
+ goto retry_lock2;
+ } else
+ ql_log(ql_log_warn, base_vha, 0xb076,
+ "IDC Lock recovery FAILED.\n");
+ }
+
+ return;
+}
+
+void
+qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+ uint16_t options = (requester_id << 15) | BIT_7, retry;
+ uint32_t data;
+ struct qla_hw_data *ha = base_vha->hw;
+
+ /* IDC-unlock implementation using driver-unlock/lock-id
+ * remote registers
+ */
+ retry = 0;
+retry_unlock:
+ if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
+ == QLA_SUCCESS) {
+ if (data == ha->portnum) {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
+ /* Clearing lock-id by setting 0xff */
+ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
+ } else if (retry < 10) {
+ /* SV: XXX: IDC unlock retrying needed here? */
+
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
+ "Failed to release IDC lock, retyring=%d\n", retry);
+ goto retry_unlock;
+ }
+ } else if (retry < 10) {
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
+ "Failed to read drv-lockid, retyring=%d\n", retry);
+ goto retry_unlock;
+ }
+
+ return;
+
+ /* XXX: IDC-unlock implementation using access-control mbx */
+ retry = 0;
+retry_unlock2:
+ if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+ if (retry < 10) {
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
+ "Failed to release IDC lock, retyring=%d\n", retry);
+ goto retry_unlock2;
+ }
+ }
+
+ return;
+}
+
+int
+__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (rval == QLA_SUCCESS) {
+ drv_presence |= (1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_presence);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_set_drv_presence(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+int
+__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (rval == QLA_SUCCESS) {
+ drv_presence &= ~(1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_presence);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_clear_drv_presence(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+void
+qla83xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack, drv_presence;
+ unsigned long ack_timeout;
+
+ /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
+ ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+ while (1) {
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (drv_ack == drv_presence)
+ break;
+
+ if (time_after_eq(jiffies, ack_timeout)) {
+ ql_log(ql_log_warn, vha, 0xb067,
+ "RESET ACK TIMEOUT! drv_presence=0x%x "
+ "drv_ack=0x%x\n", drv_presence, drv_ack);
+ /*
+ * The function(s) which did not ack in time are forced
+ * to withdraw any further participation in the IDC
+ * reset.
+ */
+ if (drv_ack != drv_presence)
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_ack);
+ break;
+ }
+
+ qla83xx_idc_unlock(vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(vha, 0);
+ }
+
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
+ ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
+}
+
+int
+qla83xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t idc_control;
+
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+ ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
+
+ /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
+ __qla83xx_set_idc_control(vha, 0);
+
+ qla83xx_idc_unlock(vha, 0);
+ rval = qla83xx_restart_nic_firmware(vha);
+ qla83xx_idc_lock(vha, 0);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb06a,
+ "Failed to restart NIC f/w.\n");
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
+ } else {
+ ql_dbg(ql_dbg_p3p, vha, 0xb06c,
+ "Success in restarting nic f/w.\n");
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
+ ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
+ }
+
+ return rval;
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
+{
+ struct qla_hw_data *ha = base_vha->hw;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ uint32_t dev_state;
+
+ /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_warn, base_vha, 0xb06e,
+ "Initialization TIMEOUT!\n");
+ /* Init timeout. Disable further NIC Core
+ * communication.
+ */
+ qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, base_vha, 0xb06f,
+ "HW State: FAILED.\n");
+ }
+
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ if (ha->flags.nic_core_reset_owner)
+ qla83xx_idc_audit(base_vha,
+ IDC_AUDIT_COMPLETION);
+ ha->flags.nic_core_reset_owner = 0;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
+ "Reset_owner reset by 0x%x.\n",
+ ha->portnum);
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ if (ha->flags.nic_core_reset_owner)
+ rval = qla83xx_device_bootstrap(base_vha);
+ else {
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ }
+ break;
+ case QLA8XXX_DEV_INITIALIZING:
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
+ qla83xx_need_reset_handler(base_vha);
+ else {
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ }
+ /* reset timeout value after need reset handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_dev_init_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* XXX: DEBUG for now */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ /* XXX: DEBUG for now */
+ if (ha->flags.quiesce_owner)
+ goto exit;
+
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ dev_init_timeout = jiffies +
+ (ha->fcoe_dev_init_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ if (ha->flags.nic_core_reset_owner)
+ qla83xx_idc_audit(base_vha,
+ IDC_AUDIT_COMPLETION);
+ ha->flags.nic_core_reset_owner = 0;
+ __qla83xx_clear_drv_presence(base_vha);
+ qla83xx_idc_unlock(base_vha, 0);
+ qla8xxx_dev_failed_handler(base_vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla83xx_idc_lock(base_vha, 0);
+ goto exit;
+ case QLA8XXX_BAD_VALUE:
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ default:
+ ql_log(ql_log_warn, base_vha, 0xb071,
+ "Unknow Device State: %x.\n", dev_state);
+ qla83xx_idc_unlock(base_vha, 0);
+ qla8xxx_dev_failed_handler(base_vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla83xx_idc_lock(base_vha, 0);
+ goto exit;
+ }
+ }
+
+exit:
+ return rval;
+}
+
/**************************************************************************
* qla2x00_do_dpc
* This kernel thread is a task that is schedule by the interrupt handler
@@ -3749,7 +4453,7 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags)) {
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ QLA8XXX_DEV_FAILED);
qla82xx_idc_unlock(ha);
ql_log(ql_log_info, base_vha, 0x4004,
"HW State: FAILED.\n");
@@ -3819,14 +4523,21 @@ qla2x00_do_dpc(void *data)
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
- qla82xx_device_state_handler(base_vha);
- clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
- if (!ha->flags.quiesce_owner) {
- qla2x00_perform_loop_resync(base_vha);
-
- qla82xx_idc_lock(ha);
- qla82xx_clear_qsnt_ready(base_vha);
- qla82xx_idc_unlock(ha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_device_state_handler(base_vha);
+ clear_bit(ISP_QUIESCE_NEEDED,
+ &base_vha->dpc_flags);
+ if (!ha->flags.quiesce_owner) {
+ qla2x00_perform_loop_resync(base_vha);
+
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_qsnt_ready(base_vha);
+ qla82xx_idc_unlock(ha);
+ }
+ } else {
+ clear_bit(ISP_QUIESCE_NEEDED,
+ &base_vha->dpc_flags);
+ qla2x00_quiesce_io(base_vha);
}
ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
"Quiescence mode end.\n");
@@ -4326,7 +5037,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_INITIALIZING);
+ QLA8XXX_DEV_INITIALIZING);
qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
QLA82XX_IDC_VERSION);
@@ -4350,12 +5061,12 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ QLA8XXX_DEV_FAILED);
} else {
ql_log(ql_log_info, base_vha, 0x900c,
"HW State: READY.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_READY);
+ QLA8XXX_DEV_READY);
qla82xx_idc_unlock(ha);
ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
@@ -4370,7 +5081,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"This devfn is not reset owner = 0x%x.\n",
ha->pdev->devfn);
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
- QLA82XX_DEV_READY)) {
+ QLA8XXX_DEV_READY)) {
ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
@@ -4495,6 +5206,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index d70f03008981..892a81e457bc 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index a683e766d1ae..32fdc2a66dd1 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -966,16 +966,16 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
QLA82XX_IDC_PARAM_ADDR , 8);
if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
- ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
- ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+ ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+ ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
} else {
- ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
- ha->nx_reset_timeout = le32_to_cpu(*wptr);
+ ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
}
ql_dbg(ql_dbg_init, vha, 0x004e,
- "nx_dev_init_timeout=%d "
- "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
- ha->nx_reset_timeout);
+ "fcoe_dev_init_timeout=%d "
+ "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
+ ha->fcoe_reset_timeout);
return;
}
@@ -1017,7 +1017,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return;
- if (ha->flags.isp82xx_reset_hdlr_active)
+ if (ha->flags.nic_core_reset_hdlr_active)
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1662,6 +1662,23 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+static uint32_t
+qla83xx_select_led_port(struct qla_hw_data *ha)
+{
+ uint32_t led_select_value = 0;
+
+ if (!IS_QLA83XX(ha))
+ goto out;
+
+ if (ha->flags.port0)
+ led_select_value = QLA83XX_LED_PORT0;
+ else
+ led_select_value = QLA83XX_LED_PORT1;
+
+out:
+ return led_select_value;
+}
+
void
qla83xx_beacon_blink(struct scsi_qla_host *vha)
{
@@ -1669,22 +1686,34 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
uint16_t led_cfg[6];
uint16_t orig_led_cfg[6];
+ uint32_t led_10_value, led_43_value;
if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
return;
- if (IS_QLA2031(ha) && ha->beacon_blink_led) {
- if (ha->flags.port0)
- led_select_value = 0x00201320;
- else
- led_select_value = 0x00201328;
+ if (!ha->beacon_blink_led)
+ return;
+
+ if (IS_QLA2031(ha)) {
+ led_select_value = qla83xx_select_led_port(ha);
- qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
- qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
+ qla83xx_wr_reg(vha, led_select_value, 0x40002000);
+ qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
+ msleep(1000);
+ qla83xx_wr_reg(vha, led_select_value, 0x40004000);
+ qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
+ } else if (IS_QLA8031(ha)) {
+ led_select_value = qla83xx_select_led_port(ha);
+
+ qla83xx_rd_reg(vha, led_select_value, &led_10_value);
+ qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
+ qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
+ msleep(500);
+ qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
msleep(1000);
- qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
- qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
- } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
+ qla83xx_wr_reg(vha, led_select_value, led_10_value);
+ qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
+ } else if (IS_QLA81XX(ha)) {
int rval;
/* Save Current */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f5fdb16bec9b..cfe934e1af42 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.03-k"
+#define QLA2XXX_VERSION "8.04.00.07-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 3
+#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index f1ad02ea212b..e4dc7c733c29 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -4,5 +4,5 @@ config SCSI_QLA_ISCSI
select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
---help---
- This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
- iSCSI host adapter family.
+ This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
+ and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 5b44139ff43d..4230977748cf 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644
index 000000000000..6e9af20be12f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -0,0 +1,1611 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ratelimit.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
+{
+ return readl((void __iomem *)(ha->nx_pcibase + addr));
+}
+
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
+{
+ writel(val, (void __iomem *)(ha->nx_pcibase + addr));
+}
+
+static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
+{
+ uint32_t val;
+ int ret_val = QLA_SUCCESS;
+
+ qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
+ val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
+ if (val != addr) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
+ __func__, addr, val);
+ ret_val = QLA_ERROR;
+ }
+
+ return ret_val;
+}
+
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t *data)
+{
+ int ret_val;
+
+ ret_val = qla4_83xx_set_win_base(ha, addr);
+
+ if (ret_val == QLA_SUCCESS)
+ *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
+ __func__, addr);
+
+ return ret_val;
+}
+
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t data)
+{
+ int ret_val;
+
+ ret_val = qla4_83xx_set_win_base(ha, addr);
+
+ if (ret_val == QLA_SUCCESS)
+ qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
+ __func__, addr, data);
+
+ return ret_val;
+}
+
+static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
+{
+ int lock_owner;
+ int timeout = 0;
+ uint32_t lock_status = 0;
+ int ret_val = QLA_SUCCESS;
+
+ while (lock_status == 0) {
+ lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
+ if (lock_status)
+ break;
+
+ if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
+ lock_owner = qla4_83xx_rd_reg(ha,
+ QLA83XX_FLASH_LOCK_ID);
+ ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
+ __func__, ha->func_num, lock_owner);
+ ret_val = QLA_ERROR;
+ break;
+ }
+ msleep(20);
+ }
+
+ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
+ return ret_val;
+}
+
+static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
+{
+ /* Reading FLASH_UNLOCK register unlocks the Flash */
+ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
+ qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
+}
+
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+ uint8_t *p_data, int u32_word_count)
+{
+ int i;
+ uint32_t u32_word;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_flash_lock(ha);
+ if (ret_val == QLA_ERROR)
+ goto exit_lock_error;
+
+ if (addr & 0x03) {
+ ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_ERROR;
+ goto exit_flash_read;
+ }
+
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_WINDOW,
+ (addr & 0xFFFF0000));
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
+ __func__, addr);
+ goto exit_flash_read;
+ }
+
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_flash_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+exit_flash_read:
+ qla4_83xx_flash_unlock(ha);
+
+exit_lock_error:
+ return ret_val;
+}
+
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+ uint32_t flash_addr, uint8_t *p_data,
+ int u32_word_count)
+{
+ uint32_t i;
+ uint32_t u32_word;
+ uint32_t flash_offset;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
+
+ if (addr & 0x3) {
+ ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_ERROR;
+ goto exit_lockless_read;
+ }
+
+ ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
+ addr);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ /* Check if data is spread across multiple sectors */
+ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+ (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+ /* This write is needed once for each sector */
+ ret_val = qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_WINDOW,
+ addr);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+exit_lockless_read:
+ return ret_val;
+}
+
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+ if (qla4_83xx_flash_lock(ha))
+ ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
+
+ /*
+ * We got the lock, or someone else is holding the lock
+ * since we are restting, forcefully unlock
+ */
+ qla4_83xx_flash_unlock(ha);
+}
+
+/**
+ * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ * On error return QLA_ERROR
+ **/
+static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+ uint32_t *data, uint32_t count)
+{
+ int i, j;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ int ret_val = QLA_SUCCESS;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write;
+ }
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+ QLA8XXX_ADDR_QDR_NET_MAX)) ||
+ (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+ addr);
+ /* Write data */
+ ret_val |= qla4_83xx_wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_LO,
+ *data++);
+ ret_val |= qla4_83xx_wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_HI,
+ *data++);
+ ret_val |= qla4_83xx_wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_ULO,
+ *data++);
+ ret_val |= qla4_83xx_wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_UHI,
+ *data++);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ MD_MIU_TEST_AGT_CTRL,
+ &agt_ctrl);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
+#define INTENT_TO_RECOVER 0x01
+#define PROCEED_TO_RECOVER 0x02
+
+static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
+{
+
+ uint32_t lock = 0, lockid;
+ int ret_val = QLA_ERROR;
+
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+
+ /* Check for other Recovery in progress, go wait */
+ if ((lockid & 0x3) != 0)
+ goto exit_lock_recovery;
+
+ /* Intent to Recover */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+ (ha->func_num << 2) | INTENT_TO_RECOVER);
+
+ msleep(200);
+
+ /* Check Intent to Recover is advertised */
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+ if ((lockid & 0x3C) != (ha->func_num << 2))
+ goto exit_lock_recovery;
+
+ ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
+ __func__, ha->func_num);
+
+ /* Proceed to Recover */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+ (ha->func_num << 2) | PROCEED_TO_RECOVER);
+
+ /* Force Unlock */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
+ ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
+
+ /* Clear bits 0-5 in IDC_RECOVERY register*/
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
+
+ /* Get lock */
+ lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
+ if (lock) {
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
+ lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
+ ret_val = QLA_SUCCESS;
+ }
+
+exit_lock_recovery:
+ return ret_val;
+}
+
+#define QLA83XX_DRV_LOCK_MSLEEP 200
+
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
+{
+ int timeout = 0;
+ uint32_t status = 0;
+ int ret_val = QLA_SUCCESS;
+ uint32_t first_owner = 0;
+ uint32_t tmo_owner = 0;
+ uint32_t lock_id;
+ uint32_t func_num;
+ uint32_t lock_cnt;
+
+ while (status == 0) {
+ status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
+ if (status) {
+ /* Increment Counter (8-31) and update func_num (0-7) on
+ * getting a successful lock */
+ lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+ lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
+ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
+ break;
+ }
+
+ if (timeout == 0)
+ /* Save counter + ID of function holding the lock for
+ * first failure */
+ first_owner = ha->isp_ops->rd_reg_direct(ha,
+ QLA83XX_DRV_LOCK_ID);
+
+ if (++timeout >=
+ (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
+ tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+ func_num = tmo_owner & 0xFF;
+ lock_cnt = tmo_owner >> 8;
+ ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
+ __func__, ha->func_num, func_num, lock_cnt,
+ (first_owner & 0xFF));
+
+ if (first_owner != tmo_owner) {
+ /* Some other driver got lock, OR same driver
+ * got lock again (counter value changed), when
+ * we were waiting for lock.
+ * Retry for another 2 sec */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
+ __func__, ha->func_num);
+ timeout = 0;
+ } else {
+ /* Same driver holding lock > 2sec.
+ * Force Recovery */
+ ret_val = qla4_83xx_lock_recovery(ha);
+ if (ret_val == QLA_SUCCESS) {
+ /* Recovered and got lock */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
+ __func__, ha->func_num);
+ break;
+ }
+ /* Recovery Failed, some other function
+ * has the lock, wait for 2secs and retry */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n",
+ __func__, ha->func_num);
+ timeout = 0;
+ }
+ }
+ msleep(QLA83XX_DRV_LOCK_MSLEEP);
+ }
+
+ return ret_val;
+}
+
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
+{
+ int id;
+
+ id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+
+ if ((id & 0xFF) != ha->func_num) {
+ ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
+ __func__, ha->func_num, (id & 0xFF));
+ return;
+ }
+
+ /* Keep lock counter value, update the ha->func_num to 0xFF */
+ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
+ qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
+}
+
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ idc_ctrl |= DONTRESET_BIT0;
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl));
+}
+
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ idc_ctrl &= ~DONTRESET_BIT0;
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl));
+}
+
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ return idc_ctrl & DONTRESET_BIT0;
+}
+
+/*-------------------------IDC State Machine ---------------------*/
+
+enum {
+ UNKNOWN_CLASS = 0,
+ NIC_CLASS,
+ FCOE_CLASS,
+ ISCSI_CLASS
+};
+
+struct device_info {
+ int func_num;
+ int device_type;
+ int port_num;
+};
+
+static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+ uint32_t dev_part, dev_part1, dev_part2;
+ int i;
+ struct device_info device_map[16];
+ int func_nibble;
+ int nibble;
+ int nic_present = 0;
+ int iscsi_present = 0;
+ int iscsi_func_low = 0;
+
+ /* Use the dev_partition register to determine the PCI function number
+ * and then check drv_active register to see which driver is loaded */
+ dev_part1 = qla4_83xx_rd_reg(ha,
+ ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
+ dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
+ drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
+
+ /* Each function has 4 bits in dev_partition Info register,
+ * Lower 2 bits - device type, Upper 2 bits - physical port number */
+ dev_part = dev_part1;
+ for (i = nibble = 0; i <= 15; i++, nibble++) {
+ func_nibble = dev_part & (0xF << (nibble * 4));
+ func_nibble >>= (nibble * 4);
+ device_map[i].func_num = i;
+ device_map[i].device_type = func_nibble & 0x3;
+ device_map[i].port_num = func_nibble & 0xC;
+
+ if (device_map[i].device_type == NIC_CLASS) {
+ if (drv_active & (1 << device_map[i].func_num)) {
+ nic_present++;
+ break;
+ }
+ } else if (device_map[i].device_type == ISCSI_CLASS) {
+ if (drv_active & (1 << device_map[i].func_num)) {
+ if (!iscsi_present ||
+ (iscsi_present &&
+ (iscsi_func_low > device_map[i].func_num)))
+ iscsi_func_low = device_map[i].func_num;
+
+ iscsi_present++;
+ }
+ }
+
+ /* For function_num[8..15] get info from dev_part2 register */
+ if (nibble == 7) {
+ nibble = 0;
+ dev_part = dev_part2;
+ }
+ }
+
+ /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
+ * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
+ * present. */
+ if (!nic_present && (ha->func_num == iscsi_func_low)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: can reset - NIC not present and lower iSCSI function is %d\n",
+ __func__, ha->func_num));
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * qla4_83xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout, dev_init_timeout;
+
+ ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
+ __func__);
+
+ if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
+ __func__));
+ qla4_8xxx_set_rst_ready(ha);
+
+ /* Non-reset owners ACK Reset and wait for device INIT state
+ * as part of Reset Recovery by Reset Owner */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ do {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
+ __func__);
+ break;
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DEV_STATE);
+ } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+ } else {
+ qla4_8xxx_set_rst_ready(ha);
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, drv_state, drv_active);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ __func__, DRIVER_NAME, drv_state,
+ drv_active);
+ break;
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+
+ drv_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DRV_STATE);
+ drv_active = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DRV_ACTIVE);
+ }
+
+ if (drv_state != drv_active) {
+ ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
+ __func__, (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
+ drv_active);
+ }
+
+ clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ /* Start Reset Recovery */
+ qla4_8xxx_device_bootstrap(ha);
+ }
+}
+
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
+{
+ uint32_t idc_params, ret_val;
+
+ ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
+ (uint8_t *)&idc_params, 1);
+ if (ret_val == QLA_SUCCESS) {
+ ha->nx_dev_init_timeout = idc_params & 0xFFFF;
+ ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
+ } else {
+ ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+ }
+
+ DEBUG2(ql4_printk(KERN_DEBUG, ha,
+ "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
+ __func__, ha->nx_dev_init_timeout,
+ ha->nx_reset_timeout));
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+
+static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
+{
+ uint8_t *phdr;
+
+ if (!ha->reset_tmplt.buff) {
+ ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
+ __func__);
+ return;
+ }
+
+ phdr = ha->reset_tmplt.buff;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
+ *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+ *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+ *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+ *(phdr+13), *(phdr+14), *(phdr+15)));
+}
+
+static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
+{
+ uint8_t *p_cache;
+ uint32_t src, count, size;
+ uint64_t dest;
+ int ret_val = QLA_SUCCESS;
+
+ src = QLA83XX_BOOTLOADER_FLASH_ADDR;
+ dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
+ size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
+
+ /* 128 bit alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ /* 16 byte count */
+ count = size/16;
+
+ p_cache = vmalloc(size);
+ if (p_cache == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_copy_bootloader;
+ }
+
+ ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
+ size / sizeof(uint32_t));
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
+ __func__);
+ goto exit_copy_error;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
+ __func__));
+
+ /* 128 bit/16 byte write to MS memory */
+ ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+ count);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
+ __func__);
+ goto exit_copy_error;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
+ __func__, size));
+
+exit_copy_error:
+ vfree(p_cache);
+
+exit_copy_bootloader:
+ return ret_val;
+}
+
+static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
+{
+ uint32_t val, ret_val = QLA_ERROR;
+ int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
+ if (val == PHAN_INITIALIZE_COMPLETE) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Command Peg initialization complete. State=0x%x\n",
+ __func__, val));
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ msleep(CRB_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ return ret_val;
+}
+
+/**
+ * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ **/
+static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
+ int duration, uint32_t test_mask,
+ uint32_t test_result)
+{
+ uint32_t value;
+ uint8_t retries;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+ if (ret_val == QLA_ERROR)
+ goto exit_poll_reg;
+
+ retries = duration / 10;
+ do {
+ if ((value & test_mask) != test_result) {
+ msleep(duration / 10);
+ ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+ if (ret_val == QLA_ERROR)
+ goto exit_poll_reg;
+
+ ret_val = QLA_ERROR;
+ } else {
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ } while (retries--);
+
+exit_poll_reg:
+ if (ret_val == QLA_ERROR) {
+ ha->reset_tmplt.seq_error++;
+ ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+ __func__, value, test_mask, test_result);
+ }
+
+ return ret_val;
+}
+
+static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
+{
+ uint32_t sum = 0;
+ uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
+ int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
+ int ret_val;
+
+ while (u16_count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* checksum of 0 indicates a valid template */
+ if (~sum) {
+ ret_val = QLA_SUCCESS;
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ }
+
+ return ret_val;
+}
+
+/**
+ * qla4_83xx_read_reset_template - Read Reset Template from Flash
+ * @ha: Pointer to adapter structure
+ **/
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
+{
+ uint8_t *p_buff;
+ uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+ uint32_t ret_val;
+
+ ha->reset_tmplt.seq_error = 0;
+ ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
+ if (ha->reset_tmplt.buff == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
+ __func__);
+ goto exit_read_reset_template;
+ }
+
+ p_buff = ha->reset_tmplt.buff;
+ addr = QLA83XX_RESET_TEMPLATE_ADDR;
+
+ tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
+ sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Read template hdr size %d from Flash\n",
+ __func__, tmplt_hdr_def_size));
+
+ /* Copy template header from flash */
+ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+ tmplt_hdr_def_size);
+ if (ret_val != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+
+ ha->reset_tmplt.hdr =
+ (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
+
+ /* Validate the template header size and signature */
+ tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+ if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+ (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+ ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
+ __func__, tmplt_hdr_size, tmplt_hdr_def_size);
+ goto exit_read_template_error;
+ }
+
+ addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
+ p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
+ tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
+ ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Read rest of the template size %d\n",
+ __func__, ha->reset_tmplt.hdr->size));
+
+ /* Copy rest of the template */
+ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+ tmplt_hdr_def_size);
+ if (ret_val != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+
+ /* Integrity check */
+ if (qla4_83xx_reset_seq_checksum_test(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
+ __func__));
+
+ /* Get STOP, START, INIT sequence offsets */
+ ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->init_seq_offset;
+ ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->start_seq_offset;
+ ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->hdr_size;
+ qla4_83xx_dump_reset_seq_hdr(ha);
+
+ goto exit_read_reset_template;
+
+exit_read_template_error:
+ vfree(ha->reset_tmplt.buff);
+
+exit_read_reset_template:
+ return;
+}
+
+/**
+ * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ **/
+static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
+ uint32_t raddr, uint32_t waddr)
+{
+ uint32_t value;
+
+ qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+ qla4_83xx_wr_reg_indirect(ha, waddr, value);
+}
+
+/**
+ * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
+ *
+ * This function read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ **/
+static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
+ uint32_t waddr,
+ struct qla4_83xx_rmw *p_rmw_hdr)
+{
+ uint32_t value;
+
+ if (p_rmw_hdr->index_a)
+ value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
+ else
+ qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+
+ value &= p_rmw_hdr->test_mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+
+ qla4_83xx_wr_reg_indirect(ha, waddr, value);
+
+ return;
+}
+
+static void qla4_83xx_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla4_83xx_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ /* Entries start after 8 byte qla4_83xx_poll, poll header contains
+ * the test_mask, test_value. */
+ p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
+ sizeof(struct qla4_83xx_poll));
+
+ delay = (long)p_hdr->delay;
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value);
+ }
+ } else {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
+ &value);
+ qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
+ &value);
+ }
+ }
+ }
+}
+
+static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla4_83xx_quad_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
+ p_entry->dr_value);
+ qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
+ __func__, i,
+ ha->reset_tmplt.seq_index));
+ }
+ }
+ }
+}
+
+static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ struct qla4_83xx_rmw *p_rmw_hdr;
+ uint32_t i;
+
+ p_rmw_hdr = (struct qla4_83xx_rmw *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
+ p_rmw_hdr);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_pause(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ int index;
+ struct qla4_83xx_quad_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
+ __func__, i,
+ ha->reset_tmplt.seq_index));
+ } else {
+ index = ha->reset_tmplt.array_index;
+ qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
+ &value);
+ ha->reset_tmplt.array[index++] = value;
+
+ if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
+ ha->reset_tmplt.array_index = 1;
+ }
+ }
+ }
+}
+
+static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ ha->reset_tmplt.seq_end = 1;
+}
+
+static void qla4_83xx_template_end(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ ha->reset_tmplt.template_end = 1;
+
+ if (ha->reset_tmplt.seq_error == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Reset sequence completed SUCCESSFULLY.\n",
+ __func__));
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
+ __func__);
+ }
+}
+
+/**
+ * qla4_83xx_process_reset_template - Process reset template.
+ *
+ * Process all entries in reset template till entry with SEQ_END opcode,
+ * which indicates end of the reset template processing. Each entry has a
+ * Reset Entry header, entry opcode/command, with size of the entry, number
+ * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ **/
+static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
+ char *p_buff)
+{
+ int index, entries;
+ struct qla4_83xx_reset_entry_hdr *p_hdr;
+ char *p_entry = p_buff;
+
+ ha->reset_tmplt.seq_end = 0;
+ ha->reset_tmplt.template_end = 0;
+ entries = ha->reset_tmplt.hdr->entries;
+ index = ha->reset_tmplt.seq_index;
+
+ for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
+
+ p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
+ switch (p_hdr->cmd) {
+ case OPCODE_NOP:
+ break;
+ case OPCODE_WRITE_LIST:
+ qla4_83xx_write_list(ha, p_hdr);
+ break;
+ case OPCODE_READ_WRITE_LIST:
+ qla4_83xx_read_write_list(ha, p_hdr);
+ break;
+ case OPCODE_POLL_LIST:
+ qla4_83xx_poll_list(ha, p_hdr);
+ break;
+ case OPCODE_POLL_WRITE_LIST:
+ qla4_83xx_poll_write_list(ha, p_hdr);
+ break;
+ case OPCODE_READ_MODIFY_WRITE:
+ qla4_83xx_read_modify_write(ha, p_hdr);
+ break;
+ case OPCODE_SEQ_PAUSE:
+ qla4_83xx_pause(ha, p_hdr);
+ break;
+ case OPCODE_SEQ_END:
+ qla4_83xx_seq_end(ha, p_hdr);
+ break;
+ case OPCODE_TMPL_END:
+ qla4_83xx_template_end(ha, p_hdr);
+ break;
+ case OPCODE_POLL_READ_LIST:
+ qla4_83xx_poll_read_list(ha, p_hdr);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+
+ /* Set pointer to next entry in the sequence. */
+ p_entry += p_hdr->size;
+ }
+
+ ha->reset_tmplt.seq_index = index;
+}
+
+static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
+{
+ ha->reset_tmplt.seq_index = 0;
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
+
+ if (ha->reset_tmplt.seq_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
+ __func__);
+}
+
+static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
+{
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
+
+ if (ha->reset_tmplt.template_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
+ __func__);
+}
+
+static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
+{
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
+
+ if (ha->reset_tmplt.seq_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
+ __func__);
+}
+
+static int qla4_83xx_restart(struct scsi_qla_host *ha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ qla4_83xx_process_stop_seq(ha);
+
+ /* Collect minidump*/
+ if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
+ qla4_8xxx_get_minidump(ha);
+
+ qla4_83xx_process_init_seq(ha);
+
+ if (qla4_83xx_copy_bootloader(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_restart;
+ }
+
+ qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
+ qla4_83xx_process_start_seq(ha);
+
+exit_restart:
+ return ret_val;
+}
+
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_restart(ha);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
+ goto exit_start_fw;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
+ __func__));
+ }
+
+ ret_val = qla4_83xx_check_cmd_peg_status(ha);
+ if (ret_val == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
+ __func__);
+
+exit_start_fw:
+ return ret_val;
+}
+
+/*----------------------Interrupt Related functions ---------------------*/
+
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ uint32_t mb_int, ret;
+
+ if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
+ qla4_8xxx_mbx_intr_disable(ha);
+
+ ret = readl(&ha->qla4_83xx_reg->mbox_int);
+ mb_int = ret & ~INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+}
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ uint32_t mb_int;
+
+ qla4_8xxx_mbx_intr_enable(ha);
+ mb_int = INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+
+ set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < incount; i++)
+ writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
+
+ writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
+
+ /* Set Host Interrupt register to 1, to tell the firmware that
+ * a mailbox command is pending. Firmware after reading the
+ * mailbox command, clears the host interrupt register */
+ writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
+}
+
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
+ if (intr_status) {
+ ha->mbox_status_count = outcount;
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ }
+}
+
+/**
+ * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
+{
+ int rval;
+ uint32_t dev_state;
+
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+ if (ql4xdontresethba)
+ qla4_83xx_set_idc_dontreset(ha);
+
+ if (dev_state == QLA8XXX_DEV_READY) {
+ /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
+ * recovery */
+ if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
+ ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_isp_reset;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
+ __func__));
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+
+ } else {
+ /* If device_state is NEED_RESET, go ahead with
+ * Reset,irrespective of ql4xdontresethba. This is to allow a
+ * non-reset-owner to force a reset. Non-reset-owner sets
+ * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+ * and then forces a Reset by setting device_state to
+ * NEED_RESET. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: HW state already set to NEED_RESET\n",
+ __func__));
+ }
+
+ /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
+ * and which drivers are present. Unlike ISP8022, the function setting
+ * NEED_RESET, may not be the Reset owner. */
+ if (qla4_83xx_can_perform_reset(ha))
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+ ha->isp_ops->idc_unlock(ha);
+ rval = qla4_8xxx_device_state_handler(ha);
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_clear_rst_ready(ha);
+exit_isp_reset:
+ ha->isp_ops->idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS)
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+
+ return rval;
+}
+
+static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
+{
+ u32 val = 0, val1 = 0;
+ int i, status = QLA_SUCCESS;
+
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
+
+ /* Port 0 Rx Buffer Pause Threshold Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+ for (i = 0; i < 8; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 Rx Buffer Pause Threshold Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+ for (i = 0; i < 8; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 0 RxB Traffic Class Max Cell Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
+ for (i = 0; i < 4; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 RxB Traffic Class Max Cell Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
+ for (i = 0; i < 4; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 0 RxB Rx Traffic Class Stats. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
+ for (i = 7; i >= 0; i--) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_STATS,
+ &val);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
+ (val | (i << 29)));
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_STATS,
+ &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 RxB Rx Traffic Class Stats. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
+ for (i = 7; i >= 0; i--) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_STATS,
+ &val);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
+ (val | (i << 29)));
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_STATS,
+ &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+ &val);
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+ &val1);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1));
+}
+
+static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+ int i;
+
+ /* set SRE-Shim Control Register */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
+ QLA83XX_SET_PAUSE_VAL);
+
+ for (i = 0; i < 8; i++) {
+ /* Port 0 Rx Buffer Pause Threshold Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
+ QLA83XX_SET_PAUSE_VAL);
+ /* Port 1 Rx Buffer Pause Threshold Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
+ QLA83XX_SET_PAUSE_VAL);
+ }
+
+ for (i = 0; i < 4; i++) {
+ /* Port 0 RxB Traffic Class Max Cell Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
+ QLA83XX_SET_TC_MAX_CELL_VAL);
+ /* Port 1 RxB Traffic Class Max Cell Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
+ QLA83XX_SET_TC_MAX_CELL_VAL);
+ }
+
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+ QLA83XX_SET_PAUSE_VAL);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+ QLA83XX_SET_PAUSE_VAL);
+
+ ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
+}
+
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+ ha->isp_ops->idc_lock(ha);
+ qla4_83xx_dump_pause_control_regs(ha);
+ __qla4_83xx_disable_pause(ha);
+ ha->isp_ops->idc_unlock(ha);
+}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644
index 000000000000..6a00f903f2a6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -0,0 +1,283 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL483XX_H
+#define __QL483XX_H
+
+/* Indirectly Mapped Registers */
+#define QLA83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLA83XX_FLASH_STATUS 0x42100004
+#define QLA83XX_FLASH_CONTROL 0x42110004
+#define QLA83XX_FLASH_ADDR 0x42110008
+#define QLA83XX_FLASH_WRDATA 0x4211000C
+#define QLA83XX_FLASH_RDDATA 0x42110018
+#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Directly Mapped Registers in 83xx register table */
+
+/* Flash access regs */
+#define QLA83XX_FLASH_LOCK 0x3850
+#define QLA83XX_FLASH_UNLOCK 0x3854
+#define QLA83XX_FLASH_LOCK_ID 0x3500
+
+/* Driver Lock regs */
+#define QLA83XX_DRV_LOCK 0x3868
+#define QLA83XX_DRV_UNLOCK 0x386C
+#define QLA83XX_DRV_LOCK_ID 0x3504
+#define QLA83XX_DRV_LOCKRECOVERY 0x379C
+
+/* IDC version */
+#define QLA83XX_IDC_VER_MAJ_VALUE 0x1
+#define QLA83XX_IDC_VER_MIN_VALUE 0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780
+#define QLA83XX_CRB_IDC_VER_MINOR 0x3798
+#define QLA83XX_IDC_DRV_CTRL 0x3790
+#define QLA83XX_IDC_DRV_AUDIT 0x3794
+#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284
+#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4
+#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4
+#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388
+#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388
+#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C
+#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C
+#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704
+#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704
+
+/* set value to pause threshold value */
+#define QLA83XX_SET_PAUSE_VAL 0x0
+#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
+
+/* qla_83xx_reg_tbl registers */
+#define QLA83XX_PEG_HALT_STATUS1 0x34A8
+#define QLA83XX_PEG_HALT_STATUS2 0x34AC
+#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
+#define QLA83XX_FW_CAPABILITIES 0x3528
+#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
+#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
+#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
+#define QLA83XX_CRB_DRV_SCRATCH 0x3548
+#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0
+#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4
+
+#define QLA83XX_FW_VER_MAJOR 0x3550
+#define QLA83XX_FW_VER_MINOR 0x3554
+#define QLA83XX_FW_VER_SUB 0x3558
+#define QLA83XX_NPAR_STATE 0x359C
+#define QLA83XX_FW_IMAGE_VALID 0x35FC
+#define QLA83XX_CMDPEG_STATE 0x3650
+#define QLA83XX_ASIC_TEMP 0x37B4
+#define QLA83XX_FW_API 0x356C
+#define QLA83XX_DRV_OP_MODE 0x3570
+
+static const uint32_t qla4_83xx_reg_tbl[] = {
+ QLA83XX_PEG_HALT_STATUS1,
+ QLA83XX_PEG_HALT_STATUS2,
+ QLA83XX_PEG_ALIVE_COUNTER,
+ QLA83XX_CRB_DRV_ACTIVE,
+ QLA83XX_CRB_DEV_STATE,
+ QLA83XX_CRB_DRV_STATE,
+ QLA83XX_CRB_DRV_SCRATCH,
+ QLA83XX_CRB_DEV_PART_INFO1,
+ QLA83XX_CRB_IDC_VER_MAJOR,
+ QLA83XX_FW_VER_MAJOR,
+ QLA83XX_FW_VER_MINOR,
+ QLA83XX_FW_VER_SUB,
+ QLA83XX_CMDPEG_STATE,
+ QLA83XX_ASIC_TEMP,
+};
+
+#define QLA83XX_CRB_WIN_BASE 0x3800
+#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
+#define QLA83XX_SEM_LOCK_BASE 0x3840
+#define QLA83XX_SEM_UNLOCK_BASE 0x3844
+#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8))
+#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
+#define QLA83XX_LINK_SPEED_FACTOR 10
+
+/* FLASH API Defines */
+#define QLA83xx_FLASH_MAX_WAIT_USEC 100
+#define QLA83XX_FLASH_LOCK_TIMEOUT 10000
+#define QLA83XX_FLASH_SECTOR_SIZE 65536
+#define QLA83XX_DRV_LOCK_TIMEOUT 2000
+#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
+#define QLA83XX_FLASH_STATUS_READY 0x6
+#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
+#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
+#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA83XX_ERASE_MODE 1
+#define QLA83XX_WRITE_MODE 2
+#define QLA83XX_DWORD_WRITE_MODE 3
+
+#define QLA83XX_GLOBAL_RESET 0x38CC
+#define QLA83XX_WILDCARD 0x38F0
+#define QLA83XX_INFORMANT 0x38FC
+#define QLA83XX_HOST_MBX_CTRL 0x3038
+#define QLA83XX_FW_MBX_CTRL 0x303C
+#define QLA83XX_BOOTLOADER_ADDR 0x355C
+#define QLA83XX_BOOTLOADER_SIZE 0x3560
+#define QLA83XX_FW_IMAGE_ADDR 0x3564
+#define QLA83XX_MBX_INTR_ENABLE 0x1000
+#define QLA83XX_MBX_INTR_MASK 0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0 0x1
+#define GRACEFUL_RESET_BIT1 0x2
+
+#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29)
+#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLA83XX_BOOT_FROM_FLASH 0
+
+#define QLA83XX_IDC_PARAM_ADDR 0x3e8020
+/* Reset template definitions */
+#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16
+#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLA83XX_RESET_SEQ_VERSION 0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP 0x0000
+#define OPCODE_WRITE_LIST 0x0001
+#define OPCODE_READ_WRITE_LIST 0x0002
+#define OPCODE_POLL_LIST 0x0004
+#define OPCODE_POLL_WRITE_LIST 0x0008
+#define OPCODE_READ_MODIFY_WRITE 0x0010
+#define OPCODE_SEQ_PAUSE 0x0020
+#define OPCODE_SEQ_END 0x0040
+#define OPCODE_TMPL_END 0x0080
+#define OPCODE_POLL_READ_LIST 0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
+struct qla4_83xx_reset_template_hdr {
+ __le16 version;
+ __le16 signature;
+ __le16 size;
+ __le16 entries;
+ __le16 hdr_size;
+ __le16 checksum;
+ __le16 init_seq_offset;
+ __le16 start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla4_83xx_reset_entry_hdr {
+ __le16 cmd;
+ __le16 size;
+ __le16 count;
+ __le16 delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla4_83xx_poll {
+ __le32 test_mask;
+ __le32 test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla4_83xx_rmw {
+ __le32 test_mask;
+ __le32 xor_value;
+ __le32 or_value;
+ uint8_t shl;
+ uint8_t shr;
+ uint8_t index_a;
+ uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla4_83xx_entry {
+ __le32 arg1;
+ __le32 arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla4_83xx_quad_entry {
+ __le32 dr_addr;
+ __le32 dr_value;
+ __le32 ar_addr;
+ __le32 ar_value;
+} __packed;
+
+struct qla4_83xx_reset_template {
+ int seq_index;
+ int seq_error;
+ int array_index;
+ uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
+ uint8_t *buff;
+ uint8_t *stop_offset;
+ uint8_t *start_offset;
+ uint8_t *init_offset;
+ struct qla4_83xx_reset_template_hdr *hdr;
+ uint8_t seq_end;
+ uint8_t template_end;
+};
+
+/* POLLRD Entry */
+struct qla83xx_minidump_entry_pollrd {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t read_addr;
+ uint32_t select_value;
+ uint16_t select_value_stride;
+ uint16_t op_count;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t data_size;
+ uint32_t rsvd_1;
+};
+
+/* RDMUX2 Entry */
+struct qla83xx_minidump_entry_rdmux2 {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr_1;
+ uint32_t select_addr_2;
+ uint32_t select_value_1;
+ uint32_t select_value_2;
+ uint32_t op_count;
+ uint32_t select_value_mask;
+ uint32_t read_addr;
+ uint8_t select_value_stride;
+ uint8_t data_size;
+ uint8_t rsvd[2];
+};
+
+/* POLLRDMWR Entry */
+struct qla83xx_minidump_entry_pollrdmwr {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+};
+
+/* IDC additional information */
+struct qla4_83xx_idc_information {
+ uint32_t request_desc; /* IDC request descriptor */
+ uint32_t info1; /* IDC additional info */
+ uint32_t info2; /* IDC additional info */
+ uint32_t info3; /* IDC additional info */
+};
+
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index c681b2a355e1..76819b71ada7 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -17,7 +17,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
return -EINVAL;
if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
@@ -38,7 +38,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
long reading;
int ret = 0;
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
return -EINVAL;
if (off != 0)
@@ -75,21 +75,21 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
break;
case 2:
/* Reset HBA */
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_READY) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ if (dev_state == QLA8XXX_DEV_READY) {
ql4_printk(KERN_INFO, ha,
"%s: Setting Need reset, reset_owner is 0x%x.\n",
__func__, ha->func_num);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_NEED_RESET);
- set_bit(AF_82XX_RST_OWNER, &ha->flags);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
__func__, dev_state);
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
break;
default:
/* do nothing */
@@ -150,7 +150,7 @@ qla4xxx_fw_version_show(struct device *dev,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
ha->firmware_version[0],
ha->firmware_version[1],
@@ -214,7 +214,7 @@ qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
@@ -226,7 +226,7 @@ qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
@@ -238,7 +238,7 @@ qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 8d58ae274829..77b7c594010f 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -37,7 +37,7 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
for (i = 1; i < MBOX_REG_COUNT; i++)
printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
- i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
+ i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
return;
}
@@ -131,3 +131,31 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
&ha->reg->ctrl_status);
}
}
+
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
+{
+ uint32_t halt_status1, halt_status2;
+
+ halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+ halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
+
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ " PEG_NET_4_PC: 0x%x\n", ha->host_no,
+ __func__, halt_status1, halt_status2,
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ } else if (is_qla8032(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
+ ha->host_no, __func__, halt_status1, halt_status2);
+ }
+}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index abd83602cdda..5b0afc18ef18 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7fdba7f1ffb7..329d553eae94 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -42,6 +42,7 @@
#include "ql4_nx.h"
#include "ql4_fw.h"
#include "ql4_nvram.h"
+#include "ql4_83xx.h"
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -59,6 +60,10 @@
#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
#endif
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
+#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
+#endif
+
#define ISP4XXX_PCI_FN_1 0x1
#define ISP4XXX_PCI_FN_2 0x3
@@ -388,8 +393,10 @@ struct isp_operations {
void (*disable_intrs) (struct scsi_qla_host *);
void (*enable_intrs) (struct scsi_qla_host *);
int (*start_firmware) (struct scsi_qla_host *);
+ int (*restart_firmware) (struct scsi_qla_host *);
irqreturn_t (*intr_handler) (int , void *);
void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
+ int (*need_reset) (struct scsi_qla_host *);
int (*reset_chip) (struct scsi_qla_host *);
int (*reset_firmware) (struct scsi_qla_host *);
void (*queue_iocb) (struct scsi_qla_host *);
@@ -397,6 +404,15 @@ struct isp_operations {
uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
int (*get_sys_info) (struct scsi_qla_host *);
+ uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
+ void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
+ int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
+ int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
+ int (*idc_lock) (struct scsi_qla_host *);
+ void (*idc_unlock) (struct scsi_qla_host *);
+ void (*rom_lock_recovery) (struct scsi_qla_host *);
+ void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
+ void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
};
struct ql4_mdump_size_table {
@@ -497,8 +513,9 @@ struct scsi_qla_host {
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
-#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
+#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
+#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
unsigned long dpc_flags;
@@ -514,7 +531,7 @@ struct scsi_qla_host {
#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
-
+#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -647,7 +664,7 @@ struct scsi_qla_host {
uint8_t acb_version;
/* qla82xx specific fields */
- struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */
+ struct device_reg_82xx __iomem *qla4_82xx_reg; /* Base I/O address */
unsigned long nx_pcibase; /* Base I/O address */
uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */
unsigned long nx_db_wr_ptr; /* Door bell write pointer */
@@ -733,6 +750,13 @@ struct scsi_qla_host {
#define MAX_MRB 128
struct mrb *active_mrb_array[MAX_MRB];
uint32_t mrb_index;
+
+ uint32_t *reg_tbl;
+ struct qla4_83xx_reset_template reset_tmplt;
+ struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
+ for ISP8324 */
+ uint32_t pf_bit;
+ struct qla4_83xx_idc_information idc_info;
};
struct ql4_task_data {
@@ -752,7 +776,7 @@ struct ql4_task_data {
struct qla_endpoint {
struct Scsi_Host *host;
- struct sockaddr dst_addr;
+ struct sockaddr_storage dst_addr;
};
struct qla_conn {
@@ -795,13 +819,20 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
}
-/* Note: Currently AER/EEH is now supported only for 8022 cards
- * This function needs to be updated when AER/EEH is enabled
- * for other cards.
- */
+static inline int is_qla8032(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
+}
+
+static inline int is_qla80XX(struct scsi_qla_host *ha)
+{
+ return is_qla8022(ha) || is_qla8032(ha);
+}
+
static inline int is_aer_supported(struct scsi_qla_host *ha)
{
- return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
+ return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
}
static inline int adapter_up(struct scsi_qla_host *ha)
@@ -942,6 +973,20 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
}
+
+static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
+ const uint32_t crb_reg)
+{
+ return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
+}
+
+static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
+ const uint32_t crb_reg,
+ const uint32_t value)
+{
+ ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
+}
+
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7240948fb929..1c4795020357 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -65,6 +65,40 @@ struct device_reg_82xx {
#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */
};
+/* ISP 83xx I/O Register Set structure */
+struct device_reg_83xx {
+ __le32 mailbox_in[16]; /* 0x0000 */
+ __le32 reserve1[496]; /* 0x0040 */
+ __le32 mailbox_out[16]; /* 0x0800 */
+ __le32 reserve2[496];
+ __le32 mbox_int; /* 0x1000 */
+ __le32 reserve3[63];
+ __le32 req_q_out; /* 0x1100 */
+ __le32 reserve4[63];
+
+ __le32 rsp_q_in; /* 0x1200 */
+ __le32 reserve5[1919];
+
+ __le32 req_q_in; /* 0x3000 */
+ __le32 reserve6[3];
+ __le32 iocb_int_mask; /* 0x3010 */
+ __le32 reserve7[3];
+ __le32 rsp_q_out; /* 0x3020 */
+ __le32 reserve8[3];
+ __le32 anonymousbuff; /* 0x3030 */
+ __le32 mb_int_mask; /* 0x3034 */
+
+ __le32 host_intr; /* 0x3038 - Host Interrupt Register */
+ __le32 risc_intr; /* 0x303C - RISC Interrupt Register */
+ __le32 reserve9[544];
+ __le32 leg_int_ptr; /* 0x38C0 - Legacy Interrupt Pointer Register */
+ __le32 leg_int_trig; /* 0x38C4 - Legacy Interrupt Trigger Control */
+ __le32 leg_int_mask; /* 0x38C8 - Legacy Interrupt Mask Register */
+};
+
+#define INT_ENABLE_FW_MB (1 << 2)
+#define INT_MASK_FW_MB (1 << 2)
+
/* remote register set (access via PCI memory read/write) */
struct isp_reg {
#define MBOX_REG_COUNT 8
@@ -356,6 +390,9 @@ struct qla_flt_region {
#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
#define LOGOUT_OPTION_RELOGIN 0x0004
#define LOGOUT_OPTION_FREE_DDB 0x0008
+#define MBOX_CMD_SET_PARAM 0x0059
+#define SET_DRVR_VERSION 0x200
+#define MAX_DRVR_VER_LEN 24
#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
@@ -417,6 +454,10 @@ struct qla_flt_region {
#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
+#define MBOX_CMD_IDC_ACK 0x0101
+#define MBOX_CMD_PORT_RESET 0x0120
+#define MBOX_CMD_SET_PORT_CONFIG 0x0122
+
/* Mailbox status definitions */
#define MBOX_COMPLETION_STATUS 4
#define MBOX_STS_BUSY 0x0007
@@ -453,6 +494,8 @@ struct qla_flt_region {
#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+#define MBOX_ASTS_IDC_COMPLETE 0x8100
+#define MBOX_ASTS_IDC_NOTIFY 0x8101
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -1195,9 +1238,12 @@ struct ql_iscsi_stats {
uint8_t reserved2[264]; /* 0x0308 - 0x040F */
};
-#define QLA82XX_DBG_STATE_ARRAY_LEN 16
-#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
-#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+#define QLA8XXX_DBG_STATE_ARRAY_LEN 16
+#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA8XXX_DBG_RSVD_ARRAY_LEN 8
+#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
+#define QLA83XX_SS_OCM_WNDREG_INDEX 3
+#define QLA83XX_SS_PCI_INDEX 0
struct qla4_8xxx_minidump_template_hdr {
uint32_t entry_type;
@@ -1214,8 +1260,9 @@ struct qla4_8xxx_minidump_template_hdr {
uint32_t driver_info_word3;
uint32_t driver_info_word4;
- uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
- uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
};
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b2525c4139e..57a5a3cf5770 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -109,28 +109,28 @@ uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
int qla4_8xxx_load_risc(struct scsi_qla_host *);
-irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id);
-void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha);
-void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha);
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
-int qla4_8xxx_crb_win_lock(struct scsi_qla_host *);
-void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *);
-int qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
-void qla4_8xxx_wr_32(struct scsi_qla_host *, ulong, u32);
-int qla4_8xxx_rd_32(struct scsi_qla_host *, ulong);
-int qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
-int qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
-int qla4_8xxx_isp_reset(struct scsi_qla_host *ha);
-void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
+int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
+void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
+int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
+int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
+int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
-uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
-uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
-void qla4_8xxx_enable_intrs(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
@@ -138,8 +138,8 @@ irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
-int qla4_8xxx_idc_lock(struct scsi_qla_host *ha);
-void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
@@ -203,6 +203,62 @@ int qla4xxx_req_template_size(struct scsi_qla_host *ha);
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
+int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status);
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
+uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t *data);
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t data);
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+ uint32_t flash_addr, uint8_t *p_data,
+ int u32_word_count);
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+ uint8_t *p_data, int u32_word_count);
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
+int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index ddd9472066cb..1aca1b4f70b8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -102,11 +102,18 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
writel(0,
- (unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out);
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out);
writel(0,
- (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in);
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
writel(0,
- (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out);
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
+ } else if (is_qla8032(ha)) {
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
} else {
/*
* Initialize DMA Shadow registers. The firmware is really
@@ -524,7 +531,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
/* For 82xx, stop firmware before initializing because if BIOS
* has previously initialized firmware, then driver's initialize
* firmware will fail. */
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
qla4_8xxx_stop_firmware(ha);
ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
@@ -537,7 +544,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
if (!qla4xxx_fw_ready(ha))
return status;
- if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+ if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
qla4xxx_alloc_fw_dump(ha);
return qla4xxx_get_firmware_status(ha);
@@ -946,9 +953,9 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
- if (is_qla8022(ha) && (status == QLA_ERROR)) {
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
/* Since interrupts are registered in start_firmware for
- * 82xx, release them here if initialize_adapter fails */
+ * 80XX, release them here if initialize_adapter fails */
qla4xxx_free_irqs(ha);
}
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 62f90bdec5d5..6f4decd44c6a 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 2a2022a6bb9b..f48f37a281d1 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -192,35 +192,47 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
}
}
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
+ readl(&ha->qla4_83xx_reg->req_q_in);
+}
+
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
+ readl(&ha->qla4_83xx_reg->rsp_q_out);
+}
+
/**
- * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s)
+ * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more new request
* queue entries have been placed on the request queue.
**/
-void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
{
uint32_t dbval = 0;
dbval = 0x14 | (ha->func_num << 5);
dbval = dbval | (0 << 8) | (ha->request_in << 16);
- qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
+ qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
}
/**
- * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s)
+ * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more response/completion
* queue entries have been processed by the driver.
* This also clears the interrupt.
**/
-void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha)
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
{
- writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out);
- readl(&ha->qla4_8xxx_reg->rsp_q_out);
+ writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
+ readl(&ha->qla4_82xx_reg->rsp_q_out);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index fc542a9bb106..15ea81465ce4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -126,7 +126,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
"handle=0x%0x, srb=%p\n", __func__,
sts_entry->handle, srb);
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -243,56 +243,72 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
scsi_set_resid(cmd, residual);
- /*
- * If there is scsi_status, it takes precedense over
- * underflow condition.
- */
- if (scsi_status != 0) {
- cmd->result = DID_OK << 16 | scsi_status;
+ if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
+
+ /* Both the firmware and target reported UNDERRUN:
+ *
+ * MID-LAYER UNDERFLOW case:
+ * Some kernels do not properly detect midlayer
+ * underflow, so we manually check it and return
+ * ERROR if the minimum required data was not
+ * received.
+ *
+ * ALL OTHER cases:
+ * Fall thru to check scsi_status
+ */
+ if (!scsi_status && (scsi_bufflen(cmd) - residual) <
+ cmd->underflow) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
+ ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd),
+ residual));
- if (scsi_status != SCSI_CHECK_CONDITION)
+ cmd->result = DID_ERROR << 16;
break;
+ }
+
+ } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
+ scsi_status != SAM_STAT_BUSY) {
- /* Copy Sense Data into sense buffer. */
- qla4xxx_copy_sense(ha, sts_entry, srb);
- } else {
/*
- * If RISC reports underrun and target does not
- * report it then we must have a lost frame, so
- * tell upper layer to retry it by reporting a
- * bus busy.
+ * The firmware reports UNDERRUN, but the target does
+ * not report it:
+ *
+ * scsi_status | host_byte device_byte
+ * | (19:16) (7:0)
+ * ============= | ========= ===========
+ * TASK_SET_FULL | DID_OK scsi_status
+ * BUSY | DID_OK scsi_status
+ * ALL OTHERS | DID_ERROR scsi_status
+ *
+ * Note: If scsi_status is task set full or busy,
+ * then this else if would fall thru to check the
+ * scsi_status and return DID_OK.
*/
- if ((sts_entry->iscsiFlags &
- ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
- cmd->result = DID_BUS_BUSY << 16;
- } else if ((scsi_bufflen(cmd) - residual) <
- cmd->underflow) {
- /*
- * Handle mid-layer underflow???
- *
- * For kernels less than 2.4, the driver must
- * return an error if an underflow is detected.
- * For kernels equal-to and above 2.4, the
- * mid-layer will appearantly handle the
- * underflow by detecting the residual count --
- * unfortunately, we do not see where this is
- * actually being done. In the interim, we
- * will return DID_ERROR.
- */
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
- "Mid-layer Data underrun1, "
- "xferlen = 0x%x, "
- "residual = 0x%x\n", ha->host_no,
- cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, __func__,
- scsi_bufflen(cmd), residual));
- cmd->result = DID_ERROR << 16;
- } else {
- cmd->result = DID_OK << 16;
- }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ residual,
+ scsi_bufflen(cmd)));
+
+ cmd->result = DID_ERROR << 16 | scsi_status;
+ goto check_scsi_status;
}
+
+ cmd->result = DID_OK << 16 | scsi_status;
+
+check_scsi_status:
+ if (scsi_status == SAM_STAT_CHECK_CONDITION)
+ qla4xxx_copy_sense(ha, sts_entry, srb);
+
break;
case SCS_DEVICE_LOGGED_OUT:
@@ -578,6 +594,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
{
int i;
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+ __le32 __iomem *mailbox_out;
+
+ if (is_qla8032(ha))
+ mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
+ else if (is_qla8022(ha))
+ mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
+ else
+ mailbox_out = &ha->reg->mailbox[0];
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -590,9 +614,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
* location and set mailbox command done flag
*/
for (i = 0; i < ha->mbox_status_count; i++)
- ha->mbox_status[i] = is_qla8022(ha)
- ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
- : readl(&ha->reg->mailbox[i]);
+ ha->mbox_status[i] = readl(&mailbox_out[i]);
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
@@ -601,9 +623,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
- mbox_sts[i] = is_qla8022(ha)
- ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
- : readl(&ha->reg->mailbox[i]);
+ mbox_sts[i] = readl(&mailbox_out[i]);
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
@@ -619,7 +639,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
qla4xxx_dump_registers(ha);
- if (ql4xdontresethba) {
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
} else {
@@ -635,7 +656,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
@@ -700,7 +721,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
(mbox_sts[2] == ACB_STATE_VALID)) {
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
@@ -785,6 +806,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
" removed\n", ha->host_no, mbox_sts[0]));
break;
+ case MBOX_ASTS_IDC_NOTIFY:
+ {
+ uint32_t opcode;
+ if (is_qla8032(ha)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
+ opcode = mbox_sts[1] >> 16;
+ if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+ (opcode == MBOX_CMD_PORT_RESET)) {
+ set_bit(DPC_POST_IDC_ACK,
+ &ha->dpc_flags);
+ ha->idc_info.request_desc = mbox_sts[1];
+ ha->idc_info.info1 = mbox_sts[2];
+ ha->idc_info.info2 = mbox_sts[3];
+ ha->idc_info.info3 = mbox_sts[4];
+ qla4xxx_wake_dpc(ha);
+ }
+ }
+ break;
+ }
+
+ case MBOX_ASTS_IDC_COMPLETE:
+ if (is_qla8032(ha)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi:%ld: AEN %04x IDC Complete notification\n",
+ ha->host_no, mbox_sts[0]));
+ }
+ break;
+
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
@@ -799,14 +857,31 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
}
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status)
+{
+ /* Process mailbox/asynch event interrupt.*/
+ if (intr_status) {
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+ /* clear the interrupt */
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ } else {
+ qla4xxx_process_response_queue(ha);
+ }
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_83xx_reg->mb_int_mask);
+}
+
/**
- * qla4_8xxx_interrupt_service_routine - isr
+ * qla4_82xx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
-void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
@@ -816,11 +891,11 @@ void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
/* Process mailbox/asynch event interrupt.*/
if (intr_status & HSRX_RISC_MB_INT)
qla4xxx_isr_decode_mailbox(ha,
- readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
+ readl(&ha->qla4_82xx_reg->mailbox_out[0]));
/* clear the interrupt */
- writel(0, &ha->qla4_8xxx_reg->host_int);
- readl(&ha->qla4_8xxx_reg->host_int);
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
}
/**
@@ -850,12 +925,12 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
}
/**
- * qla4_8xxx_spurious_interrupt - processes spurious interrupt
+ * qla4_82xx_spurious_interrupt - processes spurious interrupt
* @ha: pointer to host adapter structure.
* @reqs_count: .
*
**/
-static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
+static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
uint8_t reqs_count)
{
if (reqs_count)
@@ -863,9 +938,9 @@ static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
- writel(0, &ha->qla4_8xxx_reg->host_int);
+ writel(0, &ha->qla4_82xx_reg->host_int);
if (test_bit(AF_INTx_ENABLED, &ha->flags))
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
ha->spurious_int_count++;
@@ -968,11 +1043,11 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
}
/**
- * qla4_8xxx_intr_handler - hardware interrupt handler.
+ * qla4_82xx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
**/
-irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
uint32_t intr_status;
@@ -984,11 +1059,11 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
ha->isr_count++;
- status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+ status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
if (!(status & ha->nx_legacy_intr.int_vec_bit))
return IRQ_NONE;
- status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
+ status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s legacy Int not triggered\n", __func__));
@@ -996,30 +1071,30 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
}
/* clear the interrupt */
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
- qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
- qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (1) {
- if (!(readl(&ha->qla4_8xxx_reg->host_int) &
+ if (!(readl(&ha->qla4_82xx_reg->host_int) &
ISRX_82XX_RISC_INT)) {
- qla4_8xxx_spurious_interrupt(ha, reqs_count);
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
break;
}
- intr_status = readl(&ha->qla4_8xxx_reg->host_status);
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
if ((intr_status &
(HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
- qla4_8xxx_spurious_interrupt(ha, reqs_count);
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
break;
}
ha->isp_ops->interrupt_service_routine(ha, intr_status);
/* Enable Interrupt */
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
@@ -1029,6 +1104,59 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#define LEG_INT_PTR_B31 (1 << 31)
+#define LEG_INT_PTR_B30 (1 << 30)
+#define PF_BITS_MASK (0xF << 16)
+
+/**
+ * qla4_83xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ uint32_t leg_int_ptr = 0;
+ unsigned long flags = 0;
+
+ ha->isr_count++;
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+
+ /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+ if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+ __func__);
+ return IRQ_NONE;
+ }
+
+ /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+ __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+ return IRQ_NONE;
+ }
+
+ /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+ * Control register and poll till Legacy Interrupt Pointer register
+ * bit30 is 0.
+ */
+ writel(0, &ha->qla4_83xx_reg->leg_int_trig);
+ do {
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
+ break;
+ } while (leg_int_ptr & LEG_INT_PTR_B30);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
+ ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t
qla4_8xxx_msi_handler(int irq, void *dev_id)
{
@@ -1043,15 +1171,46 @@ qla4_8xxx_msi_handler(int irq, void *dev_id)
ha->isr_count++;
/* clear the interrupt */
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
- qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
- qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
return qla4_8xxx_default_intr_handler(irq, dev_id);
}
+static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ unsigned long flags;
+ uint32_t ival = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ ival = readl(&ha->qla4_83xx_reg->risc_intr);
+ if (ival == 0) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: It is a spurious mailbox interrupt!\n",
+ __func__);
+ ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+ ival &= ~INT_MASK_FW_MB;
+ writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+ goto exit;
+ }
+
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+ ival &= ~INT_MASK_FW_MB;
+ writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+ ha->isr_count++;
+exit:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return IRQ_HANDLED;
+}
+
/**
* qla4_8xxx_default_intr_handler - hardware interrupt handler.
* @irq: Unused
@@ -1068,29 +1227,32 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
uint32_t intr_status;
uint8_t reqs_count = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- while (1) {
- if (!(readl(&ha->qla4_8xxx_reg->host_int) &
- ISRX_82XX_RISC_INT)) {
- qla4_8xxx_spurious_interrupt(ha, reqs_count);
- break;
- }
+ if (is_qla8032(ha)) {
+ qla4_83xx_mailbox_intr_handler(irq, dev_id);
+ } else {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (1) {
+ if (!(readl(&ha->qla4_82xx_reg->host_int) &
+ ISRX_82XX_RISC_INT)) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
- intr_status = readl(&ha->qla4_8xxx_reg->host_status);
- if ((intr_status &
- (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
- qla4_8xxx_spurious_interrupt(ha, reqs_count);
- break;
- }
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if ((intr_status &
+ (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
- ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
- if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
- break;
+ if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+ break;
+ }
+ ha->isr_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-
- ha->isr_count++;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -1099,13 +1261,25 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
+ uint32_t ival = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qla4xxx_process_response_queue(ha);
- writel(0, &ha->qla4_8xxx_reg->host_int);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
+ if (is_qla8032(ha)) {
+ ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
+ if (ival == 0) {
+ ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
+ __func__);
+ goto exit_msix_rsp_q;
+ }
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
+ } else {
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ }
ha->isr_count++;
+exit_msix_rsp_q:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -1177,11 +1351,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
int ret;
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
goto try_intx;
- if (ql4xenablemsix == 2)
+ if (ql4xenablemsix == 2) {
+ /* Note: MSI Interrupts not supported for ISP8324 */
+ if (is_qla8032(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
+ __func__);
+ goto try_intx;
+ }
goto try_msi;
+ }
if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
goto try_intx;
@@ -1192,6 +1373,12 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X).\n", ha->revision_id));
goto irq_attached;
+ } else {
+ if (is_qla8032(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
+ __func__, ret);
+ goto try_intx;
+ }
}
ql4_printk(KERN_WARNING, ha,
@@ -1214,9 +1401,15 @@ try_msi:
pci_disable_msi(ha->pdev);
}
}
- ql4_printk(KERN_WARNING, ha,
- "MSI: Falling back-to INTx mode -- %d.\n", ret);
+ /*
+ * Prevent interrupts from falling back to INTx mode in cases where
+ * interrupts cannot get acquired through MSI-X or MSI mode.
+ */
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
+ goto irq_not_attached;
+ }
try_intx:
/* Trying INTx */
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -1230,7 +1423,7 @@ try_intx:
ql4_printk(KERN_WARNING, ha,
"INTx: Failed to reserve interrupt %d already in"
" use.\n", ha->pdev->irq);
- return ret;
+ goto irq_not_attached;
}
irq_attached:
@@ -1238,6 +1431,7 @@ irq_attached:
ha->host->irq = ha->pdev->irq;
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
__func__, ha->pdev->irq);
+irq_not_attached:
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index cab8f665a41f..3d41034191f0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -9,7 +9,39 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_version.h"
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int in_count)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < in_count; i++)
+ writel(mbx_cmd[i], &ha->reg->mailbox[i]);
+
+ /* Wakeup firmware */
+ writel(mbx_cmd[0], &ha->reg->mailbox[0]);
+ readl(&ha->reg->mailbox[0]);
+ writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+}
+
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->reg->ctrl_status);
+ if (intr_status & INTR_PENDING) {
+ /*
+ * Service the interrupt.
+ * The ISR will save the mailbox status registers
+ * to a temporary storage location in the adapter structure.
+ */
+ ha->mbox_status_count = out_count;
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ }
+}
/**
* qla4xxx_mailbox_command - issues mailbox commands
@@ -30,7 +62,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
int status = QLA_ERROR;
uint8_t i;
u_long wait_count;
- uint32_t intr_status;
unsigned long flags = 0;
uint32_t dev_state;
@@ -77,7 +108,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
- if (is_qla8022(ha)) {
+ if (is_qla80XX(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
@@ -85,10 +116,10 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
goto mbox_exit;
}
/* Do not send any mbx cmd if h/w is in failed state*/
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha,
"scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
ha->host_no, __func__);
@@ -102,30 +133,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
- if (is_qla8022(ha)) {
- /* Load all mailbox registers, except mailbox 0. */
- DEBUG5(
- printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
- for (i = 0; i < inCount; i++)
- printk("mb%d=%04x ", i, mbx_cmd[i]);
- printk("\n"));
-
- for (i = 1; i < inCount; i++)
- writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
- writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
- readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
- writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
- } else {
- /* Load all mailbox registers, except mailbox 0. */
- for (i = 1; i < inCount; i++)
- writel(mbx_cmd[i], &ha->reg->mailbox[i]);
-
- /* Wakeup firmware */
- writel(mbx_cmd[0], &ha->reg->mailbox[0]);
- readl(&ha->reg->mailbox[0]);
- writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
- readl(&ha->reg->ctrl_status);
- }
+ /* Queue the mailbox command to the firmware */
+ ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -167,37 +176,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (is_qla8022(ha)) {
- intr_status =
- readl(&ha->qla4_8xxx_reg->host_int);
- if (intr_status & ISRX_82XX_RISC_INT) {
- ha->mbox_status_count = outCount;
- intr_status =
- readl(&ha->qla4_8xxx_reg->host_status);
- ha->isp_ops->interrupt_service_routine(
- ha, intr_status);
- if (test_bit(AF_INTERRUPTS_ON,
- &ha->flags) &&
- test_bit(AF_INTx_ENABLED,
- &ha->flags))
- qla4_8xxx_wr_32(ha,
- ha->nx_legacy_intr.tgt_mask_reg,
- 0xfbff);
- }
- } else {
- intr_status = readl(&ha->reg->ctrl_status);
- if (intr_status & INTR_PENDING) {
- /*
- * Service the interrupt.
- * The ISR will save the mailbox status
- * registers to a temporary storage
- * location in the adapter structure.
- */
- ha->mbox_status_count = outCount;
- ha->isp_ops->interrupt_service_routine(
- ha, intr_status);
- }
- }
+ ha->isp_ops->process_mailbox_interrupt(ha, outCount);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
@@ -205,7 +184,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/* Check for mailbox timeout. */
if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
- if (is_qla8022(ha) &&
+ if (is_qla80XX(ha) &&
test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: prematurely completing mbx cmd as "
@@ -222,9 +201,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
if (is_qla8022(ha)) {
ql4_printk(KERN_INFO, ha,
"disabling pause transmit on port 0 & 1.\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
+ } else if (is_qla8032(ha)) {
+ ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ qla4_83xx_disable_pause(ha);
}
goto mbox_exit;
}
@@ -373,7 +356,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
if (is_qla8022(ha))
- qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+ qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
@@ -566,7 +549,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
@@ -1695,7 +1678,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
sess = conn->session;
- dst_addr = &qla_conn->qla_ep->dst_addr;
+ dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
if (dst_addr->sa_family == AF_INET6)
options |= IPV6_DEFAULT_DDB_ENTRY;
@@ -1953,3 +1936,72 @@ int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
}
return status;
}
+
+/**
+ * qla4_8xxx_set_param - set driver version in firmware.
+ * @ha: Pointer to host adapter structure.
+ * @param: Parameter to set i.e driver version
+ **/
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_PARAM;
+ if (param == SET_DRVR_VERSION) {
+ mbox_cmd[1] = SET_DRVR_VERSION;
+ strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
+ MAX_DRVR_VER_LEN);
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
+ __func__, param);
+ status = QLA_ERROR;
+ goto exit_set_param;
+ }
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
+ mbox_sts);
+ if (status == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]);
+
+exit_set_param:
+ return status;
+}
+
+/**
+ * qla4_83xx_post_idc_ack - post IDC ACK
+ * @ha: Pointer to host adapter structure.
+ *
+ * Posts IDC ACK for IDC Request Notification AEN.
+ **/
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_IDC_ACK;
+ mbox_cmd[1] = ha->idc_info.request_desc;
+ mbox_cmd[2] = ha->idc_info.info1;
+ mbox_cmd[3] = ha->idc_info.info2;
+ mbox_cmd[4] = ha->idc_info.info3;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+ else
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
+ __func__));
+
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 7851f314ba96..325db1f2c091 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 945cc328f57f..dba0514d1c70 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 939d7261c37a..66f52d2b5cd4 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -10,6 +10,7 @@
#include <linux/ratelimit.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
+#include "ql4_inline.h"
#include <asm-generic/io-64-nonatomic-lo-hi.h>
@@ -27,7 +28,7 @@
#define CRB_BLK(off) ((off >> 20) & 0x3f)
#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
#define CRB_WINDOW_2M (0x130060)
-#define CRB_HI(off) ((qla4_8xxx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+#define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
((off) & 0xf0000))
#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
@@ -51,7 +52,7 @@ static int qla4_8xxx_crb_table_initialized;
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
static void
-qla4_8xxx_crb_addr_transform_setup(void)
+qla4_82xx_crb_addr_transform_setup(void)
{
qla4_8xxx_crb_addr_transform(XDMA);
qla4_8xxx_crb_addr_transform(TIMR);
@@ -268,7 +269,7 @@ static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
/*
* top 12 bits of crb internal address (hub, agent)
*/
-static unsigned qla4_8xxx_crb_hub_agt[64] = {
+static unsigned qla4_82xx_crb_hub_agt[64] = {
0,
QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -353,7 +354,7 @@ static char *qdev_state[] = {
* side effect: lock crb window
*/
static void
-qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
+qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
{
u32 win_read;
@@ -373,96 +374,115 @@ qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
}
void
-qla4_8xxx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
+qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
{
unsigned long flags = 0;
int rv;
- rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
+ rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
BUG_ON(rv == -1);
if (rv == 1) {
write_lock_irqsave(&ha->hw_lock, flags);
- qla4_8xxx_crb_win_lock(ha);
- qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
+ qla4_82xx_crb_win_lock(ha);
+ qla4_82xx_pci_set_crbwindow_2M(ha, &off);
}
writel(data, (void __iomem *)off);
if (rv == 1) {
- qla4_8xxx_crb_win_unlock(ha);
+ qla4_82xx_crb_win_unlock(ha);
write_unlock_irqrestore(&ha->hw_lock, flags);
}
}
-int
-qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
{
unsigned long flags = 0;
int rv;
u32 data;
- rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
+ rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
BUG_ON(rv == -1);
if (rv == 1) {
write_lock_irqsave(&ha->hw_lock, flags);
- qla4_8xxx_crb_win_lock(ha);
- qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
+ qla4_82xx_crb_win_lock(ha);
+ qla4_82xx_pci_set_crbwindow_2M(ha, &off);
}
data = readl((void __iomem *)off);
if (rv == 1) {
- qla4_8xxx_crb_win_unlock(ha);
+ qla4_82xx_crb_win_unlock(ha);
write_unlock_irqrestore(&ha->hw_lock, flags);
}
return data;
}
/* Minidump related functions */
-static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
- u32 data, uint8_t flag)
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
{
- uint32_t win_read, off_value, rval = QLA_SUCCESS;
+ uint32_t win_read, off_value;
+ int rval = QLA_SUCCESS;
off_value = off & 0xFFFF0000;
writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
- /* Read back value to make sure write has gone through before trying
+ /*
+ * Read back value to make sure write has gone through before trying
* to use it.
*/
win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != off_value) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
- __func__, off_value, win_read, off));
- return QLA_ERROR;
+ __func__, off_value, win_read, off));
+ rval = QLA_ERROR;
+ } else {
+ off_value = off & 0x0000FFFF;
+ *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
}
+ return rval;
+}
- off_value = off & 0x0000FFFF;
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
+{
+ uint32_t win_read, off_value;
+ int rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
- if (flag)
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ rval = QLA_ERROR;
+ } else {
+ off_value = off & 0x0000FFFF;
writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase));
- else
- rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
- ha->nx_pcibase));
-
+ }
return rval;
}
#define CRB_WIN_LOCK_TIMEOUT 100000000
-int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
{
int i;
int done = 0, timeout = 0;
while (!done) {
/* acquire semaphore3 from PCI HW block */
- done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
if (done == 1)
break;
if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -478,32 +498,32 @@ int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
cpu_relax(); /*This a nop instr on i386*/
}
}
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
return 0;
}
-void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *ha)
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
{
- qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
}
#define IDC_LOCK_TIMEOUT 100000000
/**
- * qla4_8xxx_idc_lock - hw_lock
+ * qla4_82xx_idc_lock - hw_lock
* @ha: pointer to adapter structure
*
* General purpose lock used to synchronize access to
* CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
**/
-int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
{
int i;
int done = 0, timeout = 0;
while (!done) {
/* acquire semaphore5 from PCI HW block */
- done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
if (done == 1)
break;
if (timeout >= IDC_LOCK_TIMEOUT)
@@ -522,13 +542,13 @@ int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
return 0;
}
-void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha)
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
{
- qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
}
int
-qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
+qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
{
struct crb_128M_2M_sub_block_map *m;
@@ -562,44 +582,40 @@ qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
return 1;
}
-/* PCI Windowing for DDR regions. */
-#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
- (((addr) <= (high)) && ((addr) >= (low)))
-
/*
* check memory access boundary.
* used by test agent. support ddr access only for now
*/
static unsigned long
-qla4_8xxx_pci_mem_bound_check(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
unsigned long long addr, int size)
{
- if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
- QLA82XX_ADDR_DDR_NET_MAX) ||
- !QLA82XX_ADDR_IN_RANGE(addr + size - 1,
- QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) ||
+ if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX) ||
+ !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
+ QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
return 0;
}
return 1;
}
-static int qla4_8xxx_pci_set_window_warning_count;
+static int qla4_82xx_pci_set_window_warning_count;
static unsigned long
-qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
+qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
{
int window;
u32 win_read;
- if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
- QLA82XX_ADDR_DDR_NET_MAX)) {
+ if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)) {
/* DDR network side */
window = MN_WIN(addr);
ha->ddr_mn_window = window;
- qla4_8xxx_wr_32(ha, ha->mn_win_crb |
+ qla4_82xx_wr_32(ha, ha->mn_win_crb |
QLA82XX_PCI_CRBSPACE, window);
- win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
+ win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
QLA82XX_PCI_CRBSPACE);
if ((win_read << 17) != window) {
ql4_printk(KERN_WARNING, ha,
@@ -607,8 +623,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
- QLA82XX_ADDR_OCM0_MAX)) {
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+ QLA8XXX_ADDR_OCM0_MAX)) {
unsigned int temp1;
/* if bits 19:18&17:11 are on */
if ((addr & 0x00ff800) == 0xff800) {
@@ -618,9 +634,9 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
window = OCM_WIN(addr);
ha->ddr_mn_window = window;
- qla4_8xxx_wr_32(ha, ha->mn_win_crb |
+ qla4_82xx_wr_32(ha, ha->mn_win_crb |
QLA82XX_PCI_CRBSPACE, window);
- win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
+ win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
QLA82XX_PCI_CRBSPACE);
temp1 = ((window & 0x1FF) << 7) |
((window & 0x0FFFE0000) >> 17);
@@ -630,14 +646,14 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
QLA82XX_P3_ADDR_QDR_NET_MAX)) {
/* QDR network side */
window = MS_WIN(addr);
ha->qdr_sn_window = window;
- qla4_8xxx_wr_32(ha, ha->ms_win_crb |
+ qla4_82xx_wr_32(ha, ha->ms_win_crb |
QLA82XX_PCI_CRBSPACE, window);
- win_read = qla4_8xxx_rd_32(ha,
+ win_read = qla4_82xx_rd_32(ha,
ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
if (win_read != window) {
printk("%s: Written MSwin (0x%x) != Read "
@@ -650,8 +666,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
* peg gdb frequently accesses memory that doesn't exist,
* this limits the chit chat so debugging isn't slowed down.
*/
- if ((qla4_8xxx_pci_set_window_warning_count++ < 8) ||
- (qla4_8xxx_pci_set_window_warning_count%64 == 0)) {
+ if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
+ (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
printk("%s: Warning:%s Unknown address range!\n",
__func__, DRIVER_NAME);
}
@@ -661,7 +677,7 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
}
/* check if address is in the same windows as the previous access */
-static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
+static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
unsigned long long addr)
{
int window;
@@ -669,20 +685,20 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
- if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
- QLA82XX_ADDR_DDR_NET_MAX)) {
+ if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)) {
/* DDR network side */
BUG(); /* MN access can not come here */
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
- QLA82XX_ADDR_OCM0_MAX)) {
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+ QLA8XXX_ADDR_OCM0_MAX)) {
return 1;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
- QLA82XX_ADDR_OCM1_MAX)) {
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
+ QLA8XXX_ADDR_OCM1_MAX)) {
return 1;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
qdr_max)) {
/* QDR network side */
- window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+ window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
if (ha->qdr_sn_window == window)
return 1;
}
@@ -690,7 +706,7 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
return 0;
}
-static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
+static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
u64 off, void *data, int size)
{
unsigned long flags;
@@ -707,9 +723,9 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
* If attempting to access unknown address or straddle hw windows,
* do not access.
*/
- start = qla4_8xxx_pci_set_window(ha, off);
+ start = qla4_82xx_pci_set_window(ha, off);
if ((start == -1UL) ||
- (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
printk(KERN_ERR"%s out of bound pci memory access. "
"offset is 0x%llx\n", DRIVER_NAME, off);
@@ -763,7 +779,7 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
}
static int
-qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
+qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
void *data, int size)
{
unsigned long flags;
@@ -780,9 +796,9 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
* If attempting to access unknown address or straddle hw windows,
* do not access.
*/
- start = qla4_8xxx_pci_set_window(ha, off);
+ start = qla4_82xx_pci_set_window(ha, off);
if ((start == -1UL) ||
- (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
printk(KERN_ERR"%s out of bound pci memory access. "
"offset is 0x%llx\n", DRIVER_NAME, off);
@@ -835,13 +851,13 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
#define MTU_FUDGE_FACTOR 100
static unsigned long
-qla4_8xxx_decode_crb_addr(unsigned long addr)
+qla4_82xx_decode_crb_addr(unsigned long addr)
{
int i;
unsigned long base_addr, offset, pci_base;
if (!qla4_8xxx_crb_table_initialized)
- qla4_8xxx_crb_addr_transform_setup();
+ qla4_82xx_crb_addr_transform_setup();
pci_base = ADDR_ERROR;
base_addr = addr & 0xfff00000;
@@ -860,10 +876,10 @@ qla4_8xxx_decode_crb_addr(unsigned long addr)
}
static long rom_max_timeout = 100;
-static long qla4_8xxx_rom_lock_timeout = 100;
+static long qla4_82xx_rom_lock_timeout = 100;
static int
-qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
+qla4_82xx_rom_lock(struct scsi_qla_host *ha)
{
int i;
int done = 0, timeout = 0;
@@ -871,10 +887,10 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
while (!done) {
/* acquire semaphore2 from PCI HW block */
- done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla4_8xxx_rom_lock_timeout)
+ if (timeout >= qla4_82xx_rom_lock_timeout)
return -1;
timeout++;
@@ -887,24 +903,24 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
cpu_relax(); /*This a nop instr on i386*/
}
}
- qla4_8xxx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
return 0;
}
static void
-qla4_8xxx_rom_unlock(struct scsi_qla_host *ha)
+qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
{
- qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
}
static int
-qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
+qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
{
long timeout = 0;
long done = 0 ;
while (done == 0) {
- done = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
done &= 2;
timeout++;
if (timeout >= rom_max_timeout) {
@@ -917,40 +933,41 @@ qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
}
static int
-qla4_8xxx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
{
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
- if (qla4_8xxx_wait_rom_done(ha)) {
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qla4_82xx_wait_rom_done(ha)) {
printk("%s: Error waiting for rom done\n", DRIVER_NAME);
return -1;
}
/* reset abyte_cnt and dummy_byte_cnt */
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
udelay(10);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
- *valp = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
return 0;
}
static int
-qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
{
int ret, loops = 0;
- while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
+ while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
loops++;
}
if (loops >= 50000) {
- printk("%s: qla4_8xxx_rom_lock failed\n", DRIVER_NAME);
+ ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
+ DRIVER_NAME);
return -1;
}
- ret = qla4_8xxx_do_rom_fast_read(ha, addr, valp);
- qla4_8xxx_rom_unlock(ha);
+ ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
+ qla4_82xx_rom_unlock(ha);
return ret;
}
@@ -959,7 +976,7 @@ qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
* to put the ISP into operational state
**/
static int
-qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
+qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
{
int addr, val;
int i ;
@@ -973,68 +990,68 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
};
/* Halt all the indiviual PEGs and other blocks of the ISP */
- qla4_8xxx_rom_lock(ha);
+ qla4_82xx_rom_lock(ha);
/* disable all I2Q */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
/* disable all niu interrupts */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
/* disable sideband mac */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
/* disable ap0 mac */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
/* disable ap1 mac */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
- val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+ val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
/* halt timers */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
msleep(5);
/* big hammer */
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
/* don't reset CAM block on reset */
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
else
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
- qla4_8xxx_rom_unlock(ha);
+ qla4_82xx_rom_unlock(ha);
/* Read the signature value from the flash.
* Offset 0: Contain signature (0xcafecafe)
* Offset 4: Offset and number of addr/value pairs
* that present in CRB initialize sequence
*/
- if (qla4_8xxx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
- qla4_8xxx_rom_fast_read(ha, 4, &n) != 0) {
+ if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
ql4_printk(KERN_WARNING, ha,
"[ERROR] Reading crb_init area: n: %08x\n", n);
return -1;
@@ -1065,8 +1082,8 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
}
for (i = 0; i < n; i++) {
- if (qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
- qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
+ if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
0) {
kfree(buf);
return -1;
@@ -1080,7 +1097,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
/* Translate internal CRB initialization
* address to PCI bus address
*/
- off = qla4_8xxx_decode_crb_addr((unsigned long)buf[i].addr) +
+ off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
QLA82XX_PCI_CRBSPACE;
/* Not all CRB addr/value pair to be written,
* some of them are skipped
@@ -1125,7 +1142,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
continue;
}
- qla4_8xxx_wr_32(ha, off, buf[i].data);
+ qla4_82xx_wr_32(ha, off, buf[i].data);
/* ISP requires much bigger delay to settle down,
* else crb_window returns 0xffffffff
@@ -1142,25 +1159,25 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
kfree(buf);
/* Resetting the data and instruction cache */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
/* Clear all protocol processing engines */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
return 0;
}
static int
-qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
+qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
{
int i, rval = 0;
long size = 0;
@@ -1175,14 +1192,14 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
ha->host_no, __func__, flashaddr, image_start));
for (i = 0; i < size; i++) {
- if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
- (qla4_8xxx_rom_fast_read(ha, flashaddr + 4,
+ if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
(int *)&high))) {
rval = -1;
goto exit_load_from_flash;
}
data = ((u64)high << 32) | low ;
- rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
if (rval)
goto exit_load_from_flash;
@@ -1197,20 +1214,20 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
udelay(100);
read_lock(&ha->hw_lock);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
read_unlock(&ha->hw_lock);
exit_load_from_flash:
return rval;
}
-static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
+static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
{
u32 rst;
- qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
- if (qla4_8xxx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
+ qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
printk(KERN_WARNING "%s: Error during CRB Initialization\n",
__func__);
return QLA_ERROR;
@@ -1223,12 +1240,12 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
* To get around this, QM is brought out of reset.
*/
- rst = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
/* unreset qm */
rst &= ~(1 << 28);
- qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
- if (qla4_8xxx_load_from_flash(ha, image_start)) {
+ if (qla4_82xx_load_from_flash(ha, image_start)) {
printk("%s: Error trying to load fw from flash!\n", __func__);
return QLA_ERROR;
}
@@ -1237,7 +1254,7 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
}
int
-qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
u64 off, void *data, int size)
{
int i, j = 0, k, start, end, loop, sz[2], off0[2];
@@ -1249,12 +1266,12 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
* If not MN, go check for MS or invalid.
*/
- if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
mem_crb = QLA82XX_CRB_QDR_NET;
else {
mem_crb = QLA82XX_CRB_DDR_NET;
- if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
- return qla4_8xxx_pci_mem_read_direct(ha,
+ if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla4_82xx_pci_mem_read_direct(ha,
off, data, size);
}
@@ -1270,16 +1287,16 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
for (i = 0; i < loop; i++) {
temp = off8 + (i << shift_amount);
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
temp = 0;
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
temp = MIU_TA_CTL_ENABLE;
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
- temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START_ENABLE;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
if ((temp & MIU_TA_CTL_BUSY) == 0)
break;
}
@@ -1294,7 +1311,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
start = off0[i] >> 2;
end = (off0[i] + sz[i] - 1) >> 2;
for (k = start; k <= end; k++) {
- temp = qla4_8xxx_rd_32(ha,
+ temp = qla4_82xx_rd_32(ha,
mem_crb + MIU_TEST_AGT_RDDATA(k));
word[i] |= ((uint64_t)temp << (32 * (k & 1)));
}
@@ -1328,7 +1345,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
}
int
-qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
u64 off, void *data, int size)
{
int i, j, ret = 0, loop, sz[2], off0;
@@ -1339,12 +1356,12 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
/*
* If not MN, go check for MS or invalid.
*/
- if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
mem_crb = QLA82XX_CRB_QDR_NET;
else {
mem_crb = QLA82XX_CRB_DDR_NET;
- if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
- return qla4_8xxx_pci_mem_write_direct(ha,
+ if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla4_82xx_pci_mem_write_direct(ha,
off, data, size);
}
@@ -1359,7 +1376,7 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
startword = (off & 0xf)/8;
for (i = 0; i < loop; i++) {
- if (qla4_8xxx_pci_mem_read_2M(ha, off8 +
+ if (qla4_82xx_pci_mem_read_2M(ha, off8 +
(i << shift_amount), &word[i * scale], 8))
return -1;
}
@@ -1395,27 +1412,27 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
for (i = 0; i < loop; i++) {
temp = off8 + (i << shift_amount);
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
temp = 0;
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
temp = word[i * scale] & 0xffffffff;
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
temp = (word[i * scale] >> 32) & 0xffffffff;
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
temp = word[i*scale + 1] & 0xffffffff;
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
temp);
temp = (word[i*scale + 1] >> 32) & 0xffffffff;
- qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
temp);
- temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
- temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
- qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_WRITE_ENABLE;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_WRITE_START;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
if ((temp & MIU_TA_CTL_BUSY) == 0)
break;
}
@@ -1433,14 +1450,14 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
return ret;
}
-static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
+static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
{
u32 val = 0;
int retries = 60;
if (!pegtune_val) {
do {
- val = qla4_8xxx_rd_32(ha, CRB_CMDPEG_STATE);
+ val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
if ((val == PHAN_INITIALIZE_COMPLETE) ||
(val == PHAN_INITIALIZE_ACK))
return 0;
@@ -1450,7 +1467,7 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
} while (--retries);
if (!retries) {
- pegtune_val = qla4_8xxx_rd_32(ha,
+ pegtune_val = qla4_82xx_rd_32(ha,
QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
printk(KERN_WARNING "%s: init failed, "
"pegtune_val = %x\n", __func__, pegtune_val);
@@ -1460,21 +1477,21 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
return 0;
}
-static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
+static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
{
uint32_t state = 0;
int loops = 0;
/* Window 1 call */
read_lock(&ha->hw_lock);
- state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
+ state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
read_unlock(&ha->hw_lock);
while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
udelay(100);
/* Window 1 call */
read_lock(&ha->hw_lock);
- state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
+ state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
read_unlock(&ha->hw_lock);
loops++;
@@ -1494,11 +1511,21 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
{
uint32_t drv_active;
- drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- drv_active |= (1 << (ha->func_num * 4));
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha))
+ drv_active |= (1 << ha->func_num);
+ else
+ drv_active |= (1 << (ha->func_num * 4));
+
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
__func__, ha->host_no, drv_active);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
}
void
@@ -1506,50 +1533,87 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
{
uint32_t drv_active;
- drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- drv_active &= ~(1 << (ha->func_num * 4));
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha))
+ drv_active &= ~(1 << (ha->func_num));
+ else
+ drv_active &= ~(1 << (ha->func_num * 4));
+
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
__func__, ha->host_no, drv_active);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
}
-static inline int
-qla4_8xxx_need_reset(struct scsi_qla_host *ha)
+inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
{
uint32_t drv_state, drv_active;
int rval;
- drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- rval = drv_state & (1 << (ha->func_num * 4));
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha))
+ rval = drv_state & (1 << ha->func_num);
+ else
+ rval = drv_state & (1 << (ha->func_num * 4));
+
if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
rval = 1;
return rval;
}
-static inline void
-qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
{
uint32_t drv_state;
- drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- drv_state |= (1 << (ha->func_num * 4));
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha))
+ drv_state |= (1 << ha->func_num);
+ else
+ drv_state |= (1 << (ha->func_num * 4));
+
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
__func__, ha->host_no, drv_state);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
}
-static inline void
-qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
{
uint32_t drv_state;
- drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- drv_state &= ~(1 << (ha->func_num * 4));
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha))
+ drv_state &= ~(1 << ha->func_num);
+ else
+ drv_state &= ~(1 << (ha->func_num * 4));
+
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
__func__, ha->host_no, drv_state);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
}
static inline void
@@ -1557,34 +1621,44 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
{
uint32_t qsnt_state;
- qsnt_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- qsnt_state |= (2 << (ha->func_num * 4));
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+ qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function.
+ */
+ if (is_qla8032(ha))
+ qsnt_state |= (1 << ha->func_num);
+ else
+ qsnt_state |= (2 << (ha->func_num * 4));
+
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
}
static int
-qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
+qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
{
int pcie_cap;
uint16_t lnk;
/* scrub dma mask expansion register */
- qla4_8xxx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+ qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
/* Overwrite stale initialization register values */
- qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
- qla4_8xxx_wr_32(ha, CRB_RCVPEG_STATE, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
- qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+ qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
- if (qla4_8xxx_load_fw(ha, image_start) != QLA_SUCCESS) {
+ if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
printk("%s: Error trying to start fw!\n", __func__);
return QLA_ERROR;
}
/* Handshake with the card before we register the devices. */
- if (qla4_8xxx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
+ if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
printk("%s: Error during card handshake!\n", __func__);
return QLA_ERROR;
}
@@ -1595,11 +1669,10 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
ha->link_width = (lnk >> 4) & 0x3f;
/* Synchronize with Receive peg */
- return qla4_8xxx_rcvpeg_ready(ha);
+ return qla4_82xx_rcvpeg_ready(ha);
}
-static int
-qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
{
int rval = QLA_ERROR;
@@ -1617,7 +1690,7 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha,
"FW: Attempting to load firmware from flash...\n");
- rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
+ rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
@@ -1628,9 +1701,9 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
return rval;
}
-static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
{
- if (qla4_8xxx_rom_lock(ha)) {
+ if (qla4_82xx_rom_lock(ha)) {
/* Someone else is holding the lock. */
dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
}
@@ -1640,25 +1713,25 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
* else died while holding it.
* In either case, unlock.
*/
- qla4_8xxx_rom_unlock(ha);
+ qla4_82xx_rom_unlock(ha);
}
static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
- struct qla82xx_minidump_entry_crb *crb_hdr;
+ struct qla8xxx_minidump_entry_crb *crb_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+ crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
r_stride = crb_hdr->crb_strd.addr_stride;
loop_cnt = crb_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
*data_ptr++ = cpu_to_le32(r_addr);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
@@ -1667,19 +1740,19 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
}
static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
unsigned long p_wait, w_time, p_mask;
uint32_t c_value_w, c_value_r;
- struct qla82xx_minidump_entry_cache *cache_hdr;
+ struct qla8xxx_minidump_entry_cache *cache_hdr;
int rval = QLA_ERROR;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+ cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
r_addr = cache_hdr->read_addr;
@@ -1693,16 +1766,16 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
p_mask = cache_hdr->cache_ctrl.poll_mask;
for (i = 0; i < loop_count; i++) {
- qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+ ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
if (c_value_w)
- qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+ ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
if (p_mask) {
w_time = jiffies + p_wait;
do {
- c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
- 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, c_addr,
+ &c_value_r);
if ((c_value_r & p_mask) == 0) {
break;
} else if (time_after_eq(jiffies, w_time)) {
@@ -1714,7 +1787,7 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
addr = r_addr;
for (k = 0; k < r_cnt; k++) {
- r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
*data_ptr++ = cpu_to_le32(r_value);
addr += cache_hdr->read_ctrl.read_addr_stride;
}
@@ -1726,9 +1799,9 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
}
static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr)
+ struct qla8xxx_minidump_entry_hdr *entry_hdr)
{
- struct qla82xx_minidump_entry_crb *crb_entry;
+ struct qla8xxx_minidump_entry_crb *crb_entry;
uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
uint32_t crb_addr;
unsigned long wtime;
@@ -1738,58 +1811,59 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
ha->fw_dump_tmplt_hdr;
- crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+ crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
crb_addr = crb_entry->addr;
for (i = 0; i < crb_entry->op_count; i++) {
opcode = crb_entry->crb_ctrl.opcode;
- if (opcode & QLA82XX_DBG_OPCODE_WR) {
- qla4_8xxx_md_rw_32(ha, crb_addr,
- crb_entry->value_1, 1);
- opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ if (opcode & QLA8XXX_DBG_OPCODE_WR) {
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr,
+ crb_entry->value_1);
+ opcode &= ~QLA8XXX_DBG_OPCODE_WR;
}
- if (opcode & QLA82XX_DBG_OPCODE_RW) {
- read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
- qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
- opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ if (opcode & QLA8XXX_DBG_OPCODE_RW) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_RW;
}
- if (opcode & QLA82XX_DBG_OPCODE_AND) {
- read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ if (opcode & QLA8XXX_DBG_OPCODE_AND) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
read_value &= crb_entry->value_2;
- opcode &= ~QLA82XX_DBG_OPCODE_AND;
- if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ opcode &= ~QLA8XXX_DBG_OPCODE_AND;
+ if (opcode & QLA8XXX_DBG_OPCODE_OR) {
read_value |= crb_entry->value_3;
- opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ opcode &= ~QLA8XXX_DBG_OPCODE_OR;
}
- qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
}
- if (opcode & QLA82XX_DBG_OPCODE_OR) {
- read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
read_value |= crb_entry->value_3;
- qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
- opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_OR;
}
- if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
poll_time = crb_entry->crb_strd.poll_timeout;
wtime = jiffies + poll_time;
- read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
do {
if ((read_value & crb_entry->value_2) ==
- crb_entry->value_1)
+ crb_entry->value_1) {
break;
- else if (time_after_eq(jiffies, wtime)) {
+ } else if (time_after_eq(jiffies, wtime)) {
/* capturing dump failed */
rval = QLA_ERROR;
break;
- } else
- read_value = qla4_8xxx_md_rw_32(ha,
- crb_addr, 0, 0);
+ } else {
+ ha->isp_ops->rd_reg_indirect(ha,
+ crb_addr, &read_value);
+ }
} while (1);
- opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
}
- if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
if (crb_entry->crb_strd.state_index_a) {
index = crb_entry->crb_strd.state_index_a;
addr = tmplt_hdr->saved_state_array[index];
@@ -1797,13 +1871,13 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
addr = crb_addr;
}
- read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
index = crb_entry->crb_ctrl.state_index_v;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
}
- if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
if (crb_entry->crb_strd.state_index_a) {
index = crb_entry->crb_strd.state_index_a;
addr = tmplt_hdr->saved_state_array[index];
@@ -1819,11 +1893,11 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
read_value = crb_entry->value_1;
}
- qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
- opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
}
- if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
index = crb_entry->crb_ctrl.state_index_v;
read_value = tmplt_hdr->saved_state_array[index];
read_value <<= crb_entry->crb_ctrl.shl;
@@ -1833,7 +1907,7 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
read_value |= crb_entry->value_3;
read_value += crb_entry->value_1;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
}
crb_addr += crb_entry->crb_strd.addr_stride;
}
@@ -1842,15 +1916,15 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
}
static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
- struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+ struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+ ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
r_stride = ocm_hdr->read_addr_stride;
loop_cnt = ocm_hdr->op_count;
@@ -1865,20 +1939,20 @@ static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
r_addr += r_stride;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
- __func__, (loop_cnt * sizeof(uint32_t))));
+ __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
*d_ptr = data_ptr;
}
static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
- struct qla82xx_minidump_entry_mux *mux_hdr;
+ struct qla8xxx_minidump_entry_mux *mux_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+ mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
s_addr = mux_hdr->select_addr;
s_stride = mux_hdr->select_value_stride;
@@ -1886,8 +1960,8 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
loop_cnt = mux_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
- r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
*data_ptr++ = cpu_to_le32(s_value);
*data_ptr++ = cpu_to_le32(r_value);
s_value += s_stride;
@@ -1896,16 +1970,16 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
}
static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
- struct qla82xx_minidump_entry_cache *cache_hdr;
+ struct qla8xxx_minidump_entry_cache *cache_hdr;
uint32_t *data_ptr = *d_ptr;
- cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+ cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
r_addr = cache_hdr->read_addr;
c_addr = cache_hdr->control_addr;
@@ -1916,11 +1990,11 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
for (i = 0; i < loop_count; i++) {
- qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
- qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+ ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+ ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
addr = r_addr;
for (k = 0; k < r_cnt; k++) {
- r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
*data_ptr++ = cpu_to_le32(r_value);
addr += cache_hdr->read_ctrl.read_addr_stride;
}
@@ -1930,27 +2004,27 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
}
static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
- struct qla82xx_minidump_entry_queue *q_hdr;
+ struct qla8xxx_minidump_entry_queue *q_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+ q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
r_cnt = q_hdr->rd_strd.read_addr_cnt;
r_stride = q_hdr->rd_strd.read_addr_stride;
loop_cnt = q_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
r_addr = q_hdr->read_addr;
for (k = 0; k < r_cnt; k++) {
- r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -1962,17 +2036,17 @@ static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
#define MD_DIRECT_ROM_WINDOW 0x42110030
#define MD_DIRECT_ROM_READ_BASE 0x42150000
-static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
- struct qla82xx_minidump_entry_rdrom *rom_hdr;
+ struct qla8xxx_minidump_entry_rdrom *rom_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+ rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
@@ -1981,11 +2055,11 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
__func__, r_addr, loop_cnt));
for (i = 0; i < loop_cnt; i++) {
- qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
- (r_addr & 0xFFFF0000), 1);
- r_value = qla4_8xxx_md_rw_32(ha,
- MD_DIRECT_ROM_READ_BASE +
- (r_addr & 0x0000FFFF), 0, 0);
+ ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000));
+ ha->isp_ops->rd_reg_indirect(ha,
+ MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
+ &r_value);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += sizeof(uint32_t);
}
@@ -1997,17 +2071,17 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_value, r_data;
uint32_t i, j, loop_cnt;
- struct qla82xx_minidump_entry_rdmem *m_hdr;
+ struct qla8xxx_minidump_entry_rdmem *m_hdr;
unsigned long flags;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
- m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+ m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
loop_cnt = m_hdr->read_data_size/16;
@@ -2035,17 +2109,19 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
write_lock_irqsave(&ha->hw_lock, flags);
for (i = 0; i < loop_cnt; i++) {
- qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+ r_addr);
r_value = 0;
- qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
+ r_value);
r_value = MIU_TA_CTL_ENABLE;
- qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
- r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
- qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+ r_value = MIU_TA_CTL_START_ENABLE;
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
- 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ &r_value);
if ((r_value & MIU_TA_CTL_BUSY) == 0)
break;
}
@@ -2059,9 +2135,9 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
}
for (j = 0; j < 4; j++) {
- r_data = qla4_8xxx_md_rw_32(ha,
- MD_MIU_TEST_AGT_RDDATA[j],
- 0, 0);
+ ha->isp_ops->rd_reg_indirect(ha,
+ MD_MIU_TEST_AGT_RDDATA[j],
+ &r_data);
*data_ptr++ = cpu_to_le32(r_data);
}
@@ -2076,25 +2152,215 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
return QLA_SUCCESS;
}
-static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
- struct qla82xx_minidump_entry_hdr *entry_hdr,
+static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
int index)
{
- entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
ha->host_no, index, entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask));
}
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+ uint16_t s_stride, i;
+ uint32_t *data_ptr = *d_ptr;
+ uint32_t rval = QLA_SUCCESS;
+ struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
+
+ pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
+ s_addr = le32_to_cpu(pollrd_hdr->select_addr);
+ r_addr = le32_to_cpu(pollrd_hdr->read_addr);
+ s_value = le32_to_cpu(pollrd_hdr->select_value);
+ s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
+
+ poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+ poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
+
+ for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+ poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrd;
+ }
+ }
+ }
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+
+ *d_ptr = data_ptr;
+
+exit_process_pollrd:
+ return rval;
+}
+
+static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+ uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+ struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
+ sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
+ sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
+ sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
+ sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
+ sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
+ read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
+
+ for (i = 0; i < rdmux2_hdr->op_count; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & sel_val_mask;
+ *data_ptr++ = cpu_to_le32(t_sel_val);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+ ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+ *data_ptr++ = cpu_to_le32(data);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & sel_val_mask;
+ *data_ptr++ = cpu_to_le32(t_sel_val);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+ ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+ *data_ptr++ = cpu_to_le32(data);
+
+ sel_val1 += rdmux2_hdr->select_value_stride;
+ sel_val2 += rdmux2_hdr->select_value_stride;
+ }
+
+ *d_ptr = data_ptr;
+}
+
+static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t poll_wait, poll_mask, r_value, data;
+ uint32_t addr_1, addr_2, value_1, value_2;
+ uint32_t *data_ptr = *d_ptr;
+ uint32_t rval = QLA_SUCCESS;
+ struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
+
+ poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
+ addr_1 = le32_to_cpu(poll_hdr->addr_1);
+ addr_2 = le32_to_cpu(poll_hdr->addr_2);
+ value_1 = le32_to_cpu(poll_hdr->value_1);
+ value_2 = le32_to_cpu(poll_hdr->value_2);
+ poll_mask = le32_to_cpu(poll_hdr->poll_mask);
+
+ ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
+
+ poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrdmwr;
+ }
+ }
+ }
+
+ ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
+ data &= le32_to_cpu(poll_hdr->modify_mask);
+ ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
+ ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
+
+ poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrdmwr;
+ }
+ }
+ }
+
+ *data_ptr++ = cpu_to_le32(addr_2);
+ *data_ptr++ = cpu_to_le32(data);
+ *d_ptr = data_ptr;
+
+exit_process_pollrdmwr:
+ return rval;
+}
+
+static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t fl_addr, u32_count, rval;
+ struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+ fl_addr = le32_to_cpu(rom_hdr->read_addr);
+ u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+ __func__, fl_addr, u32_count));
+
+ rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
+ (u8 *)(data_ptr), u32_count);
+
+ if (rval == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
+ __func__, u32_count);
+ goto exit_process_rdrom;
+ }
+
+ data_ptr += u32_count;
+ *d_ptr = data_ptr;
+
+exit_process_rdrom:
+ return rval;
+}
+
/**
- * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
* @ha: pointer to adapter structure
**/
static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
{
int num_entry_hdr = 0;
- struct qla82xx_minidump_entry_hdr *entry_hdr;
+ struct qla8xxx_minidump_entry_hdr *entry_hdr;
struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
uint32_t *data_ptr;
uint32_t data_collected = 0;
@@ -2130,10 +2396,14 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
timestamp = (u32)(jiffies_to_msecs(now) / 1000);
tmplt_hdr->driver_timestamp = timestamp;
- entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
(((uint8_t *)ha->fw_dump_tmplt_hdr) +
tmplt_hdr->first_entry_offset);
+ if (is_qla8032(ha))
+ tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
+ tmplt_hdr->ocm_window_reg[ha->func_num];
+
/* Walk through the entry headers - validate/perform required action */
for (i = 0; i < num_entry_hdr; i++) {
if (data_collected >= ha->fw_dump_size) {
@@ -2146,7 +2416,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
if (!(entry_hdr->d_ctrl.entry_capture_mask &
ha->fw_dump_capture_mask)) {
entry_hdr->d_ctrl.driver_flags |=
- QLA82XX_DBG_SKIPPED_FLAG;
+ QLA8XXX_DBG_SKIPPED_FLAG;
goto skip_nxt_entry;
}
@@ -2159,65 +2429,105 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
* debug data
*/
switch (entry_hdr->entry_type) {
- case QLA82XX_RDEND:
- ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ case QLA8XXX_RDEND:
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
- case QLA82XX_CNTRL:
+ case QLA8XXX_CNTRL:
rval = qla4_8xxx_minidump_process_control(ha,
entry_hdr);
if (rval != QLA_SUCCESS) {
- ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
- case QLA82XX_RDCRB:
+ case QLA8XXX_RDCRB:
qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
&data_ptr);
break;
- case QLA82XX_RDMEM:
+ case QLA8XXX_RDMEM:
rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
&data_ptr);
if (rval != QLA_SUCCESS) {
- ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
- case QLA82XX_BOARD:
- case QLA82XX_RDROM:
- qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
- &data_ptr);
+ case QLA8XXX_BOARD:
+ case QLA8XXX_RDROM:
+ if (is_qla8022(ha)) {
+ qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
+ &data_ptr);
+ } else if (is_qla8032(ha)) {
+ rval = qla4_83xx_minidump_process_rdrom(ha,
+ entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha,
+ entry_hdr,
+ i);
+ }
break;
- case QLA82XX_L2DTG:
- case QLA82XX_L2ITG:
- case QLA82XX_L2DAT:
- case QLA82XX_L2INS:
+ case QLA8XXX_L2DTG:
+ case QLA8XXX_L2ITG:
+ case QLA8XXX_L2DAT:
+ case QLA8XXX_L2INS:
rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
&data_ptr);
if (rval != QLA_SUCCESS) {
- ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
- case QLA82XX_L1DAT:
- case QLA82XX_L1INS:
+ case QLA8XXX_L1DTG:
+ case QLA8XXX_L1ITG:
+ case QLA8XXX_L1DAT:
+ case QLA8XXX_L1INS:
qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
&data_ptr);
break;
- case QLA82XX_RDOCM:
+ case QLA8XXX_RDOCM:
qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
&data_ptr);
break;
- case QLA82XX_RDMUX:
+ case QLA8XXX_RDMUX:
qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
&data_ptr);
break;
- case QLA82XX_QUEUE:
+ case QLA8XXX_QUEUE:
qla4_8xxx_minidump_process_queue(ha, entry_hdr,
&data_ptr);
break;
- case QLA82XX_RDNOP:
+ case QLA83XX_POLLRD:
+ if (!is_qla8032(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA83XX_RDMUX2:
+ if (!is_qla8032(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ qla83xx_minidump_process_rdmux2(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA83XX_POLLRDMWR:
+ if (!is_qla8032(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8XXX_RDNOP:
default:
- ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2226,7 +2536,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
ha->fw_dump_tmplt_size));
skip_nxt_entry:
/* next entry in the template */
- entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
(((uint8_t *)entry_hdr) +
entry_hdr->entry_size);
}
@@ -2266,33 +2576,45 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
}
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
+{
+ if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+ !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+ if (!qla4_8xxx_collect_md_data(ha)) {
+ qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+ set_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
+ __func__);
+ }
+ }
+}
+
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
*
* Note: IDC lock must be held upon entry
**/
-static int
-qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
{
int rval = QLA_ERROR;
int i, timeout;
- uint32_t old_count, count;
+ uint32_t old_count, count, idc_ctrl;
int need_reset = 0, peg_stuck = 1;
- need_reset = qla4_8xxx_need_reset(ha);
-
- old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ need_reset = ha->isp_ops->need_reset(ha);
+ old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
timeout = msleep_interruptible(200);
if (timeout) {
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
return rval;
}
- count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
if (count != old_count)
peg_stuck = 0;
}
@@ -2300,13 +2622,13 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
if (need_reset) {
/* We are trying to perform a recovery here. */
if (peg_stuck)
- qla4_8xxx_rom_lock_recovery(ha);
+ ha->isp_ops->rom_lock_recovery(ha);
goto dev_initialize;
} else {
/* Start of day for this ha context. */
if (peg_stuck) {
/* Either we are the first or recovery in progress. */
- qla4_8xxx_rom_lock_recovery(ha);
+ ha->isp_ops->rom_lock_recovery(ha);
goto dev_initialize;
} else {
/* Firmware already running. */
@@ -2318,46 +2640,53 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
dev_initialize:
/* set to DEV_INITIALIZING */
ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_INITIALIZING);
- /* Driver that sets device state to initializating sets IDC version */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
-
- qla4_8xxx_idc_unlock(ha);
- if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
- !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
- if (!qla4_8xxx_collect_md_data(ha)) {
- qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
- } else {
- ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
- clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ /*
+ * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
+ * device goes to INIT state.
+ */
+ if (is_qla8032(ha)) {
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
}
}
- rval = qla4_8xxx_try_start_fw(ha);
- qla4_8xxx_idc_lock(ha);
+
+ ha->isp_ops->idc_unlock(ha);
+
+ if (is_qla8022(ha))
+ qla4_8xxx_get_minidump(ha);
+
+ rval = ha->isp_ops->restart_firmware(ha);
+ ha->isp_ops->idc_lock(ha);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
qla4_8xxx_clear_drv_active(ha);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
return rval;
}
dev_ready:
ql4_printk(KERN_INFO, ha, "HW State: READY\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
return rval;
}
/**
- * qla4_8xxx_need_reset_handler - Code to start reset sequence
+ * qla4_82xx_need_reset_handler - Code to start reset sequence
* @ha: pointer to adapter structure
*
* Note: IDC lock must be held upon entry
**/
static void
-qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
+qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active;
uint32_t active_mask = 0xFFFFFFFF;
@@ -2367,12 +2696,12 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
"Performing ISP error recovery\n");
if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
- qla4_8xxx_idc_unlock(ha);
+ qla4_82xx_idc_unlock(ha);
ha->isp_ops->disable_intrs(ha);
- qla4_8xxx_idc_lock(ha);
+ qla4_82xx_idc_lock(ha);
}
- if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s(%ld): reset acknowledged\n",
__func__, ha->host_no));
@@ -2384,8 +2713,8 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
- drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
ql4_printk(KERN_INFO, ha,
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
@@ -2403,31 +2732,31 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
* When reset_owner times out, check which functions
* acked/did not ack
*/
- if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
ql4_printk(KERN_INFO, ha,
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state,
drv_active);
}
- qla4_8xxx_idc_unlock(ha);
+ qla4_82xx_idc_unlock(ha);
msleep(1000);
- qla4_8xxx_idc_lock(ha);
+ qla4_82xx_idc_lock(ha);
- drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
- drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
}
/* Clear RESET OWNER as we are not going to use it any further */
- clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+ clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
- if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ if (dev_state != QLA8XXX_DEV_INITIALIZING) {
ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
qla4_8xxx_set_rst_ready(ha);
}
}
@@ -2439,9 +2768,104 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
void
qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
{
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_qsnt_ready(ha);
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
+}
+
+static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if (drv_active == (1 << (ha->func_num * 4))) {
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+ ql4_printk(KERN_INFO, ha,
+ "%s: IDC version updated to %d\n", __func__,
+ QLA82XX_IDC_VERSION);
+ } else {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ if (QLA82XX_IDC_VERSION != idc_ver) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+ __func__, QLA82XX_IDC_VERSION, idc_ver);
+ }
+ }
+}
+
+static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if (drv_active == (1 << ha->func_num)) {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ idc_ver &= (~0xFF);
+ idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
+ ql4_printk(KERN_INFO, ha,
+ "%s: IDC version updated to %d\n", __func__,
+ idc_ver);
+ } else {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ idc_ver &= 0xFF;
+ if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+ __func__, QLA83XX_IDC_VER_MAJ_VALUE,
+ idc_ver);
+ rval = QLA_ERROR;
+ goto exit_set_idc_ver;
+ }
+ }
+
+ /* Update IDC_MINOR_VERSION */
+ idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
+ idc_ver &= ~(0x03 << (ha->func_num * 2));
+ idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
+ qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+ return rval;
+}
+
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ goto exit_update_idc_reg;
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_set_drv_active(ha);
+
+ /*
+ * If we are the first driver to load and
+ * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
+ */
+ if (is_qla8032(ha)) {
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
+ qla4_83xx_clear_idc_dontreset(ha);
+ }
+
+ if (is_qla8022(ha)) {
+ qla4_82xx_set_idc_ver(ha);
+ } else if (is_qla8032(ha)) {
+ rval = qla4_83xx_set_idc_ver(ha);
+ if (rval == QLA_ERROR)
+ qla4_8xxx_clear_drv_active(ha);
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+
+exit_update_idc_reg:
+ return rval;
}
/**
@@ -2456,13 +2880,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
int rval = QLA_SUCCESS;
unsigned long dev_init_timeout;
- if (!test_bit(AF_INIT_DONE, &ha->flags)) {
- qla4_8xxx_idc_lock(ha);
- qla4_8xxx_set_drv_active(ha);
- qla4_8xxx_idc_unlock(ha);
- }
+ rval = qla4_8xxx_update_idc_reg(ha);
+ if (rval == QLA_ERROR)
+ goto exit_state_handler;
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown"));
@@ -2470,7 +2892,7 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
@@ -2479,65 +2901,75 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
DRIVER_NAME,
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
}
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown");
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
- case QLA82XX_DEV_READY:
+ case QLA8XXX_DEV_READY:
goto exit;
- case QLA82XX_DEV_COLD:
+ case QLA8XXX_DEV_COLD:
rval = qla4_8xxx_device_bootstrap(ha);
goto exit;
- case QLA82XX_DEV_INITIALIZING:
- qla4_8xxx_idc_unlock(ha);
+ case QLA8XXX_DEV_INITIALIZING:
+ ha->isp_ops->idc_unlock(ha);
msleep(1000);
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
break;
- case QLA82XX_DEV_NEED_RESET:
- if (!ql4xdontresethba) {
- qla4_8xxx_need_reset_handler(ha);
- /* Update timeout value after need
- * reset handler */
- dev_init_timeout = jiffies +
- (ha->nx_dev_init_timeout * HZ);
- } else {
- qla4_8xxx_idc_unlock(ha);
- msleep(1000);
- qla4_8xxx_idc_lock(ha);
+ case QLA8XXX_DEV_NEED_RESET:
+ /*
+ * For ISP8324, if NEED_RESET is set by any driver,
+ * it should be honored, irrespective of IDC_CTRL
+ * DONTRESET_BIT0
+ */
+ if (is_qla8032(ha)) {
+ qla4_83xx_need_reset_handler(ha);
+ } else if (is_qla8022(ha)) {
+ if (!ql4xdontresethba) {
+ qla4_82xx_need_reset_handler(ha);
+ /* Update timeout value after need
+ * reset handler */
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
+ } else {
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+ }
}
break;
- case QLA82XX_DEV_NEED_QUIESCENT:
+ case QLA8XXX_DEV_NEED_QUIESCENT:
/* idc locked/unlocked in handler */
qla4_8xxx_need_qsnt_handler(ha);
break;
- case QLA82XX_DEV_QUIESCENT:
- qla4_8xxx_idc_unlock(ha);
+ case QLA8XXX_DEV_QUIESCENT:
+ ha->isp_ops->idc_unlock(ha);
msleep(1000);
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
break;
- case QLA82XX_DEV_FAILED:
- qla4_8xxx_idc_unlock(ha);
+ case QLA8XXX_DEV_FAILED:
+ ha->isp_ops->idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
goto exit;
default:
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
goto exit;
}
}
exit:
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
+exit_state_handler:
return rval;
}
@@ -2546,8 +2978,13 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
int retval;
/* clear the interrupt */
- writel(0, &ha->qla4_8xxx_reg->host_int);
- readl(&ha->qla4_8xxx_reg->host_int);
+ if (is_qla8032(ha)) {
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ readl(&ha->qla4_83xx_reg->risc_intr);
+ } else if (is_qla8022(ha)) {
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
+ }
retval = qla4_8xxx_device_state_handler(ha);
@@ -2581,13 +3018,13 @@ flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
}
static uint32_t *
-qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
+qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
uint32_t faddr, uint32_t length)
{
uint32_t i;
uint32_t val;
int loops = 0;
- while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
+ while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
cond_resched();
loops++;
@@ -2599,7 +3036,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
/* Dword reads to flash. */
for (i = 0; i < length/4; i++, faddr += 4) {
- if (qla4_8xxx_do_rom_fast_read(ha, faddr, &val)) {
+ if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
ql4_printk(KERN_WARNING, ha,
"Do ROM fast read failed\n");
goto done_read;
@@ -2608,7 +3045,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
}
done_read:
- qla4_8xxx_rom_unlock(ha);
+ qla4_82xx_rom_unlock(ha);
return dwptr;
}
@@ -2616,10 +3053,10 @@ done_read:
* Address and length are byte address
**/
static uint8_t *
-qla4_8xxx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
- qla4_8xxx_read_flash_data(ha, (uint32_t *)buf, offset, length);
+ qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
return buf;
}
@@ -2646,7 +3083,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
const char *loc, *locations[] = { "DEF", "FLT" };
uint16_t *wptr;
uint16_t cnt, chksum;
- uint32_t start;
+ uint32_t start, status;
struct qla_flt_header *flt;
struct qla_flt_region *region;
struct ql82xx_hw_data *hw = &ha->hw;
@@ -2655,8 +3092,18 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
wptr = (uint16_t *)ha->request_ring;
flt = (struct qla_flt_header *)ha->request_ring;
region = (struct qla_flt_region *)&flt[1];
- qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
- flt_addr << 2, OPTROM_BURST_SIZE);
+
+ if (is_qla8022(ha)) {
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ flt_addr << 2, OPTROM_BURST_SIZE);
+ } else if (is_qla8032(ha)) {
+ status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
+ (uint8_t *)ha->request_ring,
+ 0x400);
+ if (status != QLA_SUCCESS)
+ goto no_flash_data;
+ }
+
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (flt->version != __constant_cpu_to_le16(1)) {
@@ -2732,7 +3179,7 @@ done:
}
static void
-qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
+qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
{
#define FLASH_BLK_SIZE_4K 0x1000
#define FLASH_BLK_SIZE_32K 0x8000
@@ -2750,7 +3197,7 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
wptr = (uint16_t *)ha->request_ring;
fdt = (struct qla_fdt_layout *)ha->request_ring;
- qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
@@ -2799,7 +3246,7 @@ done:
}
static void
-qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
+qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
{
#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
uint32_t *wptr;
@@ -2807,7 +3254,7 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
if (!is_qla8022(ha))
return;
wptr = (uint32_t *)ha->request_ring;
- qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
QLA82XX_IDC_PARAM_ADDR , 8);
if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
@@ -2825,6 +3272,39 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
return;
}
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int in_count)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < in_count; i++)
+ writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
+
+ /* Wakeup firmware */
+ writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
+ readl(&ha->qla4_82xx_reg->mailbox_in[0]);
+ writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
+ readl(&ha->qla4_82xx_reg->hint);
+}
+
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->qla4_82xx_reg->host_int);
+ if (intr_status & ISRX_82XX_RISC_INT) {
+ ha->mbox_status_count = out_count;
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+ if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_INTx_ENABLED, &ha->flags))
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+ 0xfbff);
+ }
+}
+
int
qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
{
@@ -2836,8 +3316,12 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
return ret;
qla4_8xxx_get_flt_info(ha, flt_addr);
- qla4_8xxx_get_fdt_info(ha);
- qla4_8xxx_get_idc_param(ha);
+ if (is_qla8022(ha)) {
+ qla4_82xx_get_fdt_info(ha);
+ qla4_82xx_get_idc_param(ha);
+ } else if (is_qla8032(ha)) {
+ qla4_83xx_get_idc_param(ha);
+ }
return QLA_SUCCESS;
}
@@ -2871,36 +3355,36 @@ qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
}
/**
- * qla4_8xxx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
* @ha: pointer to host adapter structure.
**/
int
-qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
+qla4_82xx_isp_reset(struct scsi_qla_host *ha)
{
int rval;
uint32_t dev_state;
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_82xx_idc_lock(ha);
+ dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_READY) {
+ if (dev_state == QLA8XXX_DEV_READY) {
ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_NEED_RESET);
- set_bit(AF_82XX_RST_OWNER, &ha->flags);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
- qla4_8xxx_idc_unlock(ha);
+ qla4_82xx_idc_unlock(ha);
rval = qla4_8xxx_device_state_handler(ha);
- qla4_8xxx_idc_lock(ha);
+ qla4_82xx_idc_lock(ha);
qla4_8xxx_clear_rst_ready(ha);
- qla4_8xxx_idc_unlock(ha);
+ qla4_82xx_idc_unlock(ha);
if (rval == QLA_SUCCESS) {
- ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
+ ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
clear_bit(AF_FW_RECOVERY, &ha->flags);
}
@@ -2981,8 +3465,7 @@ exit_validate_mac82:
/* Interrupt handling helpers. */
-static int
-qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3003,8 +3486,7 @@ qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-static int
-qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3027,26 +3509,26 @@ qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
}
void
-qla4_8xxx_enable_intrs(struct scsi_qla_host *ha)
+qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
{
qla4_8xxx_mbx_intr_enable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - reset */
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
spin_unlock_irq(&ha->hardware_lock);
set_bit(AF_INTERRUPTS_ON, &ha->flags);
}
void
-qla4_8xxx_disable_intrs(struct scsi_qla_host *ha)
+qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
{
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
qla4_8xxx_mbx_intr_disable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - set */
- qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
spin_unlock_irq(&ha->hardware_lock);
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 30258479f100..9dc0bbfe50d5 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -25,6 +25,8 @@
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
+#define CRB_CMDPEG_CHECK_DELAY 500
#define qla82xx_get_temp_val(x) ((x) >> 16)
#define qla82xx_get_temp_state(x) ((x) & 0xffff)
@@ -490,8 +492,8 @@ enum {
* Base addresses of major components on-chip.
* ====================== BASE ADDRESSES ON-CHIP ======================
*/
-#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
-#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLA8XXX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA8XXX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
/* Imbus address bit used to indicate a host address. This bit is
* eliminated by the pcie bar and bar select before presentation
@@ -500,14 +502,15 @@ enum {
#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
-#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
-#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
-#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
-#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
-#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLA8XXX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA8XXX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA8XXX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA8XXX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA8XXX_ADDR_QDR_NET (0x0000000300000000ULL)
#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+#define QLA8XXX_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
@@ -517,6 +520,10 @@ enum {
#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+/* PCI Windowing for DDR regions. */
+#define QLA8XXX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
/*
* Register offsets for MN
*/
@@ -540,6 +547,11 @@ enum {
#define MIU_TA_CTL_WRITE 4
#define MIU_TA_CTL_BUSY 8
+#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
+ MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
/*CAM RAM */
# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
@@ -565,20 +577,53 @@ enum {
/* Driver Coexistence Defines */
#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
-#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
-#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+
+enum qla_regs {
+ QLA8XXX_PEG_HALT_STATUS1 = 0,
+ QLA8XXX_PEG_HALT_STATUS2,
+ QLA8XXX_PEG_ALIVE_COUNTER,
+ QLA8XXX_CRB_DRV_ACTIVE,
+ QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_CRB_DRV_STATE,
+ QLA8XXX_CRB_DRV_SCRATCH,
+ QLA8XXX_CRB_DEV_PART_INFO,
+ QLA8XXX_CRB_DRV_IDC_VERSION,
+ QLA8XXX_FW_VERSION_MAJOR,
+ QLA8XXX_FW_VERSION_MINOR,
+ QLA8XXX_FW_VERSION_SUB,
+ QLA8XXX_CRB_CMDPEG_STATE,
+ QLA8XXX_CRB_TEMP_STATE,
+};
+
+static const uint32_t qla4_82xx_reg_tbl[] = {
+ QLA82XX_PEG_HALT_STATUS1,
+ QLA82XX_PEG_HALT_STATUS2,
+ QLA82XX_PEG_ALIVE_COUNTER,
+ QLA82XX_CRB_DRV_ACTIVE,
+ QLA82XX_CRB_DEV_STATE,
+ QLA82XX_CRB_DRV_STATE,
+ QLA82XX_CRB_DRV_SCRATCH,
+ QLA82XX_CRB_DEV_PART_INFO,
+ QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_FW_VERSION_MAJOR,
+ QLA82XX_FW_VERSION_MINOR,
+ QLA82XX_FW_VERSION_SUB,
+ CRB_CMDPEG_STATE,
+ CRB_TEMP_STATE,
+};
/* Every driver should use these Device State */
-#define QLA82XX_DEV_COLD 1
-#define QLA82XX_DEV_INITIALIZING 2
-#define QLA82XX_DEV_READY 3
-#define QLA82XX_DEV_NEED_RESET 4
-#define QLA82XX_DEV_NEED_QUIESCENT 5
-#define QLA82XX_DEV_FAILED 6
-#define QLA82XX_DEV_QUIESCENT 7
+#define QLA8XXX_DEV_COLD 1
+#define QLA8XXX_DEV_INITIALIZING 2
+#define QLA8XXX_DEV_READY 3
+#define QLA8XXX_DEV_NEED_RESET 4
+#define QLA8XXX_DEV_NEED_QUIESCENT 5
+#define QLA8XXX_DEV_FAILED 6
+#define QLA8XXX_DEV_QUIESCENT 7
#define MAX_STATES 8 /* Increment if new state added */
#define QLA82XX_IDC_VERSION 0x1
@@ -795,47 +840,51 @@ struct crb_addr_pair {
/* Minidump related */
/* Entry Type Defines */
-#define QLA82XX_RDNOP 0
-#define QLA82XX_RDCRB 1
-#define QLA82XX_RDMUX 2
-#define QLA82XX_QUEUE 3
-#define QLA82XX_BOARD 4
-#define QLA82XX_RDOCM 6
-#define QLA82XX_PREGS 7
-#define QLA82XX_L1DTG 8
-#define QLA82XX_L1ITG 9
-#define QLA82XX_L1DAT 11
-#define QLA82XX_L1INS 12
-#define QLA82XX_L2DTG 21
-#define QLA82XX_L2ITG 22
-#define QLA82XX_L2DAT 23
-#define QLA82XX_L2INS 24
-#define QLA82XX_RDROM 71
-#define QLA82XX_RDMEM 72
-#define QLA82XX_CNTRL 98
-#define QLA82XX_RDEND 255
+#define QLA8XXX_RDNOP 0
+#define QLA8XXX_RDCRB 1
+#define QLA8XXX_RDMUX 2
+#define QLA8XXX_QUEUE 3
+#define QLA8XXX_BOARD 4
+#define QLA8XXX_RDOCM 6
+#define QLA8XXX_PREGS 7
+#define QLA8XXX_L1DTG 8
+#define QLA8XXX_L1ITG 9
+#define QLA8XXX_L1DAT 11
+#define QLA8XXX_L1INS 12
+#define QLA8XXX_L2DTG 21
+#define QLA8XXX_L2ITG 22
+#define QLA8XXX_L2DAT 23
+#define QLA8XXX_L2INS 24
+#define QLA83XX_POLLRD 35
+#define QLA83XX_RDMUX2 36
+#define QLA83XX_POLLRDMWR 37
+#define QLA8XXX_RDROM 71
+#define QLA8XXX_RDMEM 72
+#define QLA8XXX_CNTRL 98
+#define QLA83XX_TLHDR 99
+#define QLA8XXX_RDEND 255
/* Opcodes for Control Entries.
* These Flags are bit fields.
*/
-#define QLA82XX_DBG_OPCODE_WR 0x01
-#define QLA82XX_DBG_OPCODE_RW 0x02
-#define QLA82XX_DBG_OPCODE_AND 0x04
-#define QLA82XX_DBG_OPCODE_OR 0x08
-#define QLA82XX_DBG_OPCODE_POLL 0x10
-#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
-#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
-#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+#define QLA8XXX_DBG_OPCODE_WR 0x01
+#define QLA8XXX_DBG_OPCODE_RW 0x02
+#define QLA8XXX_DBG_OPCODE_AND 0x04
+#define QLA8XXX_DBG_OPCODE_OR 0x08
+#define QLA8XXX_DBG_OPCODE_POLL 0x10
+#define QLA8XXX_DBG_OPCODE_RDSTATE 0x20
+#define QLA8XXX_DBG_OPCODE_WRSTATE 0x40
+#define QLA8XXX_DBG_OPCODE_MDSTATE 0x80
/* Driver Flags */
-#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
-#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
+#define QLA8XXX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA8XXX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
* mismatch */
/* Driver_code is for driver to write some info about the entry
* currently not used.
*/
-struct qla82xx_minidump_entry_hdr {
+struct qla8xxx_minidump_entry_hdr {
uint32_t entry_type;
uint32_t entry_size;
uint32_t entry_capture_size;
@@ -848,8 +897,8 @@ struct qla82xx_minidump_entry_hdr {
};
/* Read CRB entry header */
-struct qla82xx_minidump_entry_crb {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_crb {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t addr;
struct {
uint8_t addr_stride;
@@ -871,8 +920,8 @@ struct qla82xx_minidump_entry_crb {
uint32_t value_3;
};
-struct qla82xx_minidump_entry_cache {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_cache {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t tag_reg_addr;
struct {
uint16_t tag_value_stride;
@@ -895,8 +944,8 @@ struct qla82xx_minidump_entry_cache {
};
/* Read OCM */
-struct qla82xx_minidump_entry_rdocm {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdocm {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t rsvd_0;
uint32_t rsvd_1;
uint32_t data_size;
@@ -908,24 +957,24 @@ struct qla82xx_minidump_entry_rdocm {
};
/* Read Memory */
-struct qla82xx_minidump_entry_rdmem {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdmem {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t rsvd[6];
uint32_t read_addr;
uint32_t read_data_size;
};
/* Read ROM */
-struct qla82xx_minidump_entry_rdrom {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdrom {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t rsvd[6];
uint32_t read_addr;
uint32_t read_data_size;
};
/* Mux entry */
-struct qla82xx_minidump_entry_mux {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_mux {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t select_addr;
uint32_t rsvd_0;
uint32_t data_size;
@@ -937,8 +986,8 @@ struct qla82xx_minidump_entry_mux {
};
/* Queue entry */
-struct qla82xx_minidump_entry_queue {
- struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_queue {
+ struct qla8xxx_minidump_entry_hdr h;
uint32_t select_addr;
struct {
uint16_t queue_id_stride;
@@ -956,23 +1005,6 @@ struct qla82xx_minidump_entry_queue {
} rd_strd;
};
-#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
-#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
-#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
-#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
-#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
-#define QLA82XX_MINIDUMP_MEM_SIZE 0
-#define QLA82XX_MAX_ENTRY_HDR 4
-
-struct qla82xx_minidump {
- uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
- uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
- uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
- uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
- uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
- uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
-};
-
#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
#define RQST_TMPLT_SIZE 0x0
#define RQST_TMPLT 0x1
@@ -982,6 +1014,16 @@ struct qla82xx_minidump {
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
+
+#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
+
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
0x410000AC, 0x410000B8, 0x410000BC };
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 9da426628b97..ad2da9cd7000 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -18,6 +18,7 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
+#include "ql4_83xx.h"
/*
* Driver version
@@ -160,7 +161,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static umode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t qla4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason);
@@ -203,7 +204,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
CAP_DATADGST | CAP_LOGIN_OFFLOAD |
CAP_MULTI_R2T,
- .attr_is_visible = ql4_attr_is_visible,
+ .attr_is_visible = qla4_attr_is_visible,
.create_session = qla4xxx_session_create,
.destroy_session = qla4xxx_session_destroy,
.start_conn = qla4xxx_conn_start,
@@ -315,7 +316,7 @@ exit_send_ping:
return rval;
}
-static umode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t qla4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
@@ -1366,7 +1367,7 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
- dst_addr = &qla_conn->qla_ep->dst_addr;
+ dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -2315,8 +2316,17 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
if (ha->nx_pcibase)
iounmap(
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
- } else if (ha->reg)
+ } else if (is_qla8032(ha)) {
+ if (ha->nx_pcibase)
+ iounmap(
+ (struct device_reg_83xx __iomem *)ha->nx_pcibase);
+ } else if (ha->reg) {
iounmap(ha->reg);
+ }
+
+ if (ha->reset_tmplt.buff)
+ vfree(ha->reset_tmplt.buff);
+
pci_release_regions(ha->pdev);
}
@@ -2420,7 +2430,7 @@ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
uint32_t temp, temp_state, temp_val;
int status = QLA_SUCCESS;
- temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+ temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
temp_state = qla82xx_get_temp_state(temp);
temp_val = qla82xx_get_temp_val(temp);
@@ -2456,7 +2466,8 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
uint32_t fw_heartbeat_counter;
int status = QLA_SUCCESS;
- fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
@@ -2470,28 +2481,7 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
/* FW not alive after 2 seconds */
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
-
- ql4_printk(KERN_INFO, ha,
- "scsi(%ld): %s, Dumping hw/fw registers:\n "
- " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
- " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
- " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
- " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
- ha->host_no, __func__,
- qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1),
- qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS2),
- qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
- 0x3c),
- qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
- 0x3c),
- qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
- 0x3c),
- qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
- 0x3c),
- qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
- 0x3c));
+ qla4_8xxx_dump_peg_reg(ha);
status = QLA_ERROR;
}
} else
@@ -2501,6 +2491,48 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
return status;
}
+static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
+{
+ uint32_t halt_status;
+ int halt_status_unrecoverable = 0;
+
+ halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+
+ if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
+ __func__);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE)
+ halt_status_unrecoverable = 1;
+ } else if (is_qla8032(ha)) {
+ if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
+ ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
+ __func__);
+ else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
+ halt_status_unrecoverable = 1;
+ }
+
+ /*
+ * Since we cannot change dev_state in interrupt context,
+ * set appropriate DPC flag then wakeup DPC
+ */
+ if (halt_status_unrecoverable) {
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
+ __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_wake_dpc(ha);
+}
+
/**
* qla4_8xxx_watchdog - Poll dev state
* @ha: Pointer to host adapter structure.
@@ -2509,31 +2541,33 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
**/
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
- uint32_t dev_state, halt_status;
+ uint32_t dev_state;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
if (qla4_8xxx_check_temp(ha)) {
- ql4_printk(KERN_INFO, ha, "disabling pause"
- " transmit on port 0 & 1.\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
- CRB_NIU_XG_PAUSE_CTL_P0 |
- CRB_NIU_XG_PAUSE_CTL_P1);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
- } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
- !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
- if (!ql4xdontresethba) {
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if (is_qla8032(ha) ||
+ (is_qla8022(ha) && !ql4xdontresethba)) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
"NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
}
- } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
__func__);
@@ -2541,36 +2575,8 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
} else {
/* Check firmware health */
- if (qla4_8xxx_check_fw_alive(ha)) {
- ql4_printk(KERN_INFO, ha, "disabling pause"
- " transmit on port 0 & 1.\n");
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
- CRB_NIU_XG_PAUSE_CTL_P0 |
- CRB_NIU_XG_PAUSE_CTL_P1);
- halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
-
- if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
- ql4_printk(KERN_ERR, ha, "%s:"
- " Firmware aborted with"
- " error code 0x00006700."
- " Device is being reset\n",
- __func__);
-
- /* Since we cannot change dev_state in interrupt
- * context, set appropriate DPC flag then wakeup
- * DPC */
- if (halt_status & HALT_STATUS_UNRECOVERABLE)
- set_bit(DPC_HA_UNRECOVERABLE,
- &ha->dpc_flags);
- else {
- ql4_printk(KERN_INFO, ha, "%s: detect "
- "abort needed!\n", __func__);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- }
- qla4xxx_mailbox_premature_completion(ha);
- qla4xxx_wake_dpc(ha);
- }
+ if (qla4_8xxx_check_fw_alive(ha))
+ qla4_8xxx_process_fw_error(ha);
}
}
}
@@ -2652,11 +2658,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (is_qla8022(ha)) {
+ if (is_qla80XX(ha))
qla4_8xxx_watchdog(ha);
- }
- if (!is_qla8022(ha)) {
+ if (is_qla40XX(ha)) {
/* Check for heartbeat interval. */
if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
ha->heartbeat_interval != 0) {
@@ -2941,6 +2946,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+ if (is_qla8032(ha) &&
+ !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ /* disable pause frame for ISP83xx */
+ qla4_83xx_disable_pause(ha);
+ }
+
iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
@@ -2953,9 +2966,9 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
goto recover_ha_init_adapter;
}
- /* For the ISP-82xx adapter, issue a stop_firmware if invoked
+ /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
* from eh_host_reset or ioctl module */
- if (is_qla8022(ha) && !reset_chip &&
+ if (is_qla80XX(ha) && !reset_chip &&
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2978,13 +2991,13 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
}
/* Issue full chip reset if recovering from a catastrophic error,
- * or if stop_firmware fails for ISP-82xx.
+ * or if stop_firmware fails for ISP-8xxx.
* This is the default case for ISP-4xxx */
- if (!is_qla8022(ha) || reset_chip) {
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha) || reset_chip) {
+ if (is_qla40XX(ha))
goto chip_reset;
- /* Check if 82XX firmware is alive or not
+ /* Check if 8XXX firmware is alive or not
* We may have arrived here from NEED_RESET
* detection only */
if (test_bit(AF_FW_RECOVERY, &ha->flags))
@@ -3000,10 +3013,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
-
+chip_reset:
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
-chip_reset:
+
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3021,7 +3034,7 @@ recover_ha_init_adapter:
/* For ISP-4xxx, force function 1 to always initialize
* before function 3 to prevent both funcions from
* stepping on top of the other */
- if (!is_qla8022(ha) && (ha->mac_index == 3))
+ if (is_qla40XX(ha) && (ha->mac_index == 3))
ssleep(6);
/* NOTE: AF_ONLINE flag set upon successful completion of
@@ -3039,11 +3052,12 @@ recover_ha_init_adapter:
* Since we don't want to block the DPC for too long
* with multiple resets in the same thread,
* utilize DPC to retry */
- if (is_qla8022(ha)) {
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_INFO, ha, "%s: don't retry "
"recover adapter. H/W is in Failed "
"state\n", __func__);
@@ -3168,6 +3182,7 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
+ int status = QLA_SUCCESS;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
@@ -3175,11 +3190,20 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" unblock user space session\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
- return QLA_SUCCESS;
+ if (!iscsi_is_session_online(cls_session)) {
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+ } else {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
+ ha->host_no, __func__, ddb_entry->fw_ddb_index,
+ cls_session->sid);
+ status = QLA_ERROR;
+ }
+
+ return status;
}
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
@@ -3373,15 +3397,26 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* post events to application */
qla4xxx_do_work(ha);
- if (is_qla8022(ha)) {
+ if (is_qla80XX(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
- qla4_8xxx_idc_lock(ha);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
- qla4_8xxx_idc_unlock(ha);
+ if (is_qla8032(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ /* disable pause frame for ISP83xx */
+ qla4_83xx_disable_pause(ha);
+ }
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
qla4_8xxx_device_state_handler(ha);
}
+
+ if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
+ qla4_83xx_post_idc_ack(ha);
+
if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
qla4_8xxx_need_qsnt_handler(ha);
}
@@ -3391,7 +3426,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
(test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
- if (ql4xdontresethba) {
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3477,6 +3513,18 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
ha->isp_ops->disable_intrs(ha);
}
+ if (is_qla40XX(ha)) {
+ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ } else if (is_qla8022(ha)) {
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
+ } else if (is_qla8032(ha)) {
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ readl(&ha->qla4_83xx_reg->risc_intr);
+ }
+
/* Remove timer thread, if present */
if (ha->timer_active)
qla4xxx_stop_timer(ha);
@@ -3492,10 +3540,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
/* Put firmware in known state */
ha->isp_ops->reset_firmware(ha);
- if (is_qla8022(ha)) {
- qla4_8xxx_idc_lock(ha);
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
qla4_8xxx_clear_drv_active(ha);
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
}
/* Detach interrupts */
@@ -3542,16 +3590,20 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
/* Mapping of IO base pointer, door bell read and write pointer */
/* mapping of IO base pointer */
- ha->qla4_8xxx_reg =
- (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
- 0xbc000 + (ha->pdev->devfn << 11));
+ if (is_qla8022(ha)) {
+ ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
+ ((uint8_t *)ha->nx_pcibase + 0xbc000 +
+ (ha->pdev->devfn << 11));
+ ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+ QLA82XX_CAM_RAM_DB2);
+ } else if (is_qla8032(ha)) {
+ ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
+ ((uint8_t *)ha->nx_pcibase);
+ }
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
db_len = pci_resource_len(pdev, 4);
- ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
- QLA82XX_CAM_RAM_DB2);
-
return 0;
iospace_error_exit:
return -ENOMEM;
@@ -3639,23 +3691,64 @@ static struct isp_operations qla4xxx_isp_ops = {
.rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
.rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
.get_sys_info = qla4xxx_get_sys_info,
+ .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
};
-static struct isp_operations qla4_8xxx_isp_ops = {
+static struct isp_operations qla4_82xx_isp_ops = {
.iospace_config = qla4_8xxx_iospace_config,
.pci_config = qla4_8xxx_pci_config,
- .disable_intrs = qla4_8xxx_disable_intrs,
- .enable_intrs = qla4_8xxx_enable_intrs,
+ .disable_intrs = qla4_82xx_disable_intrs,
+ .enable_intrs = qla4_82xx_enable_intrs,
.start_firmware = qla4_8xxx_load_risc,
- .intr_handler = qla4_8xxx_intr_handler,
- .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
- .reset_chip = qla4_8xxx_isp_reset,
+ .restart_firmware = qla4_82xx_try_start_fw,
+ .intr_handler = qla4_82xx_intr_handler,
+ .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
+ .need_reset = qla4_8xxx_need_reset,
+ .reset_chip = qla4_82xx_isp_reset,
.reset_firmware = qla4_8xxx_stop_firmware,
- .queue_iocb = qla4_8xxx_queue_iocb,
- .complete_iocb = qla4_8xxx_complete_iocb,
- .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
- .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
+ .queue_iocb = qla4_82xx_queue_iocb,
+ .complete_iocb = qla4_82xx_complete_iocb,
+ .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
.get_sys_info = qla4_8xxx_get_sys_info,
+ .rd_reg_direct = qla4_82xx_rd_32,
+ .wr_reg_direct = qla4_82xx_wr_32,
+ .rd_reg_indirect = qla4_82xx_md_rd_32,
+ .wr_reg_indirect = qla4_82xx_md_wr_32,
+ .idc_lock = qla4_82xx_idc_lock,
+ .idc_unlock = qla4_82xx_idc_unlock,
+ .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
+ .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_83xx_isp_ops = {
+ .iospace_config = qla4_8xxx_iospace_config,
+ .pci_config = qla4_8xxx_pci_config,
+ .disable_intrs = qla4_83xx_disable_intrs,
+ .enable_intrs = qla4_83xx_enable_intrs,
+ .start_firmware = qla4_8xxx_load_risc,
+ .restart_firmware = qla4_83xx_start_firmware,
+ .intr_handler = qla4_83xx_intr_handler,
+ .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
+ .need_reset = qla4_8xxx_need_reset,
+ .reset_chip = qla4_83xx_isp_reset,
+ .reset_firmware = qla4_8xxx_stop_firmware,
+ .queue_iocb = qla4_83xx_queue_iocb,
+ .complete_iocb = qla4_83xx_complete_iocb,
+ .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in,
+ .get_sys_info = qla4_8xxx_get_sys_info,
+ .rd_reg_direct = qla4_83xx_rd_reg,
+ .wr_reg_direct = qla4_83xx_wr_reg,
+ .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
+ .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
+ .idc_lock = qla4_83xx_drv_lock,
+ .idc_unlock = qla4_83xx_drv_unlock,
+ .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
+ .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
};
uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
@@ -3663,9 +3756,14 @@ uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
}
-uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
{
- return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
+}
+
+uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
}
uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
@@ -3673,9 +3771,14 @@ uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
}
-uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
{
- return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
+}
+
+uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
}
static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
@@ -5050,30 +5153,36 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->pdev = pdev;
ha->host = host;
ha->host_no = host->host_no;
+ ha->func_num = PCI_FUNC(ha->pdev->devfn);
pci_enable_pcie_error_reporting(pdev);
/* Setup Runtime configurable options */
if (is_qla8022(ha)) {
- ha->isp_ops = &qla4_8xxx_isp_ops;
- rwlock_init(&ha->hw_lock);
+ ha->isp_ops = &qla4_82xx_isp_ops;
+ ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
ha->qdr_sn_window = -1;
ha->ddr_mn_window = -1;
ha->curr_window = 255;
- ha->func_num = PCI_FUNC(ha->pdev->devfn);
nx_legacy_intr = &legacy_intr[ha->func_num];
ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
ha->nx_legacy_intr.tgt_status_reg =
nx_legacy_intr->tgt_status_reg;
ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+ } else if (is_qla8032(ha)) {
+ ha->isp_ops = &qla4_83xx_isp_ops;
+ ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
} else {
ha->isp_ops = &qla4xxx_isp_ops;
}
- /* Set EEH reset type to fundamental if required by hba */
- if (is_qla8022(ha))
+ if (is_qla80XX(ha)) {
+ rwlock_init(&ha->hw_lock);
+ ha->pf_bit = ha->func_num << 16;
+ /* Set EEH reset type to fundamental if required by hba */
pdev->needs_freset = 1;
+ }
/* Configure PCI I/O space. */
ret = ha->isp_ops->iospace_config(ha);
@@ -5094,6 +5203,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
init_completion(&ha->disable_acb_comp);
spin_lock_init(&ha->hardware_lock);
+ spin_lock_init(&ha->work_lock);
/* Initialize work list */
INIT_LIST_HEAD(&ha->work_list);
@@ -5128,8 +5238,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ret)
goto probe_failed;
- if (is_qla8022(ha))
- (void) qla4_8xxx_get_flash_info(ha);
+ if (is_qla80XX(ha))
+ qla4_8xxx_get_flash_info(ha);
+
+ if (is_qla8032(ha)) {
+ qla4_83xx_read_reset_template(ha);
+ /*
+ * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+ * If DONRESET_BIT0 is set, drivers should not set dev_state
+ * to NEED_RESET. But if NEED_RESET is set, drivers should
+ * should honor the reset.
+ */
+ if (ql4xdontresethba == 1)
+ qla4_83xx_set_idc_dontreset(ha);
+ }
/*
* Initialize the Host adapter request/response queues and
@@ -5137,14 +5259,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* NOTE: interrupts enabled upon successful completion
*/
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+
+ /* Dont retry adapter initialization if IRQ allocation failed */
+ if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
+ goto skip_retry_init;
+
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
- if (is_qla8022(ha)) {
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA82XX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
"initialize adapter. H/W is in failed state\n",
__func__);
@@ -5160,16 +5288,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
+skip_retry_init:
if (!test_bit(AF_ONLINE, &ha->flags)) {
ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
- if (is_qla8022(ha) && ql4xdontresethba) {
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
/* Put the device in failed state. */
DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
- qla4_8xxx_idc_lock(ha);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
}
ret = -ENODEV;
goto remove_host;
@@ -5195,12 +5325,13 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
goto remove_host;
}
- /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
+ /*
+ * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
* (which is called indirectly by qla4xxx_initialize_adapter),
* so that irqs will be registered after crbinit but before
* mbx_intr_enable.
*/
- if (!is_qla8022(ha)) {
+ if (is_qla40XX(ha)) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to reserve "
@@ -5226,6 +5357,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
+ /* Set the driver version */
+ if (is_qla80XX(ha))
+ qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
+
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha,
"%s: No iSCSI boot target configured\n", __func__);
@@ -5333,9 +5468,16 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
{
struct scsi_qla_host *ha;
+ /*
+ * If the PCI device is disabled then it means probe_adapter had
+ * failed and resources already cleaned up on probe_adapter exit.
+ */
+ if (!pci_is_enabled(pdev))
+ return;
+
ha = pci_get_drvdata(pdev);
- if (!is_qla8022(ha))
+ if (is_qla40XX(ha))
qla4xxx_prevent_other_port_reinit(ha);
/* destroy iface from sysfs */
@@ -5755,7 +5897,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
ha = to_qla_host(cmd->device->host);
- if (ql4xdontresethba) {
+ if (is_qla8032(ha) && ql4xdontresethba)
+ qla4_83xx_set_idc_dontreset(ha);
+
+ /*
+ * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
+ * protocol drivers, we should not set device_state to
+ * NEED_RESET
+ */
+ if (ql4xdontresethba ||
+ (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
@@ -5779,7 +5930,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
}
if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -5874,7 +6025,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
break;
case SCSI_FIRMWARE_RESET:
if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
/* set firmware context reset */
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
@@ -6013,32 +6164,43 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
"0x%x is the owner\n", ha->host_no, __func__,
ha->pdev->devfn);
- qla4_8xxx_idc_lock(ha);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_COLD);
-
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
- QLA82XX_IDC_VERSION);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_COLD);
+ ha->isp_ops->idc_unlock(ha);
+
+ rval = qla4_8xxx_update_idc_reg(ha);
+ if (rval == QLA_ERROR) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
+ ha->host_no, __func__);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
+ goto exit_error_recovery;
+ }
- qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
- qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"FAILED\n", ha->host_no, __func__);
+ ha->isp_ops->idc_lock(ha);
qla4_8xxx_clear_drv_active(ha);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_FAILED);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"READY\n", ha->host_no, __func__);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_READY);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_READY);
/* Clear driver state register */
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
+ ha->isp_ops->idc_unlock(ha);
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to "
@@ -6050,13 +6212,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
rval = QLA_SUCCESS;
}
}
- qla4_8xxx_idc_unlock(ha);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
"the reset owner\n", ha->host_no, __func__,
ha->pdev->devfn);
- if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
- QLA82XX_DEV_READY)) {
+ if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
+ QLA8XXX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
@@ -6071,11 +6232,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
rval = QLA_SUCCESS;
}
}
- qla4_8xxx_idc_lock(ha);
+ ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
- qla4_8xxx_idc_unlock(ha);
+ ha->isp_ops->idc_unlock(ha);
}
}
+exit_error_recovery:
clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
return rval;
}
@@ -6114,7 +6276,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
ha->isp_ops->disable_intrs(ha);
- if (is_qla8022(ha)) {
+ if (is_qla80XX(ha)) {
if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
ret = PCI_ERS_RESULT_RECOVERED;
goto exit_slot_reset;
@@ -6180,6 +6342,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 725034f4252c..f6df2ea91ab5 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2010 QLogic Corporation
+ * Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k18"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 182d5a57ab74..57fbd5a3d4e2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -109,6 +109,7 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_OPT_BLKS 64
#define DEF_PHYSBLK_EXP 0
#define DEF_PTYPE 0
+#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
#define DEF_SECTOR_SIZE 512
#define DEF_UNMAP_ALIGNMENT 0
@@ -193,11 +194,11 @@ static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
+static bool scsi_debug_removable = DEF_REMOVABLE;
static int scsi_debug_cmnd_count = 0;
#define DEV_READONLY(TGT) (0)
-#define DEV_REMOVEABLE(TGT) (0)
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -919,7 +920,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
return ret;
}
/* drops through here for a standard inquiry */
- arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
+ arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
arr[2] = scsi_debug_scsi_level;
arr[3] = 2; /* response_data_format==2 */
arr[4] = SDEBUG_LONG_INQ_SZ - 5;
@@ -1211,7 +1212,7 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target)
p[11] = sdebug_sectors_per & 0xff;
p[12] = (scsi_debug_sector_size >> 8) & 0xff;
p[13] = scsi_debug_sector_size & 0xff;
- if (DEV_REMOVEABLE(target))
+ if (scsi_debug_removable)
p[20] |= 0x20; /* should agree with INQUIRY */
if (1 == pcontrol)
memset(p + 2, 0, sizeof(format_pg) - 2);
@@ -2754,6 +2755,7 @@ module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
@@ -2796,6 +2798,7 @@ MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
@@ -3205,6 +3208,25 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
}
DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
+static ssize_t sdebug_removable_show(struct device_driver *ddp,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
+}
+static ssize_t sdebug_removable_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_removable = (n > 0);
+ return count;
+ }
+ return -EINVAL;
+}
+DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
+ sdebug_removable_store);
+
/* Note: The following function creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -3230,6 +3252,7 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
@@ -3255,6 +3278,7 @@ static void do_remove_driverfs_files(void)
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index cf8dfab9489f..43fca9170bf2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -172,6 +172,7 @@ static struct {
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
{"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index faa790fba134..da36a3a81a9e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2473,7 +2473,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
*/
- if (sdev->sdev_state == SDEV_BLOCK)
+ if ((sdev->sdev_state == SDEV_BLOCK) ||
+ (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
sdev->sdev_state = new_state;
else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
if (new_state == SDEV_TRANSPORT_OFFLINE ||
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index d947ffc20ceb..3e58b2245f1f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -921,6 +921,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_RETRY_HWERROR)
sdev->retry_hwerror = 1;
+ if (*bflags & BLIST_NO_DIF)
+ sdev->no_dif = 1;
+
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 093d4f6a54d2..ce5224c92eda 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1031,33 +1031,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
void scsi_remove_target(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev->parent);
- struct scsi_target *starget, *found;
+ struct scsi_target *starget, *last = NULL;
unsigned long flags;
- restart:
- found = NULL;
+ /* remove targets being careful to lookup next entry before
+ * deleting the last
+ */
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
- found = starget;
- found->reap_ref++;
- break;
+ /* assuming new targets arrive at the end */
+ starget->reap_ref++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (last)
+ scsi_target_reap(last);
+ last = starget;
+ __scsi_remove_target(starget);
+ spin_lock_irqsave(shost->host_lock, flags);
}
}
spin_unlock_irqrestore(shost->host_lock, flags);
- if (found) {
- __scsi_remove_target(found);
- scsi_target_reap(found);
- /* in the case where @dev has multiple starget children,
- * continue removing.
- *
- * FIXME: does such a case exist?
- */
- goto restart;
- }
+ if (last)
+ scsi_target_reap(last);
}
EXPORT_SYMBOL(scsi_remove_target);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4df73e52a4f9..12f6fdfc1147 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -262,6 +262,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+sd_store_protection_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ unsigned int val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ err = kstrtouint(buf, 10, &val);
+
+ if (err)
+ return err;
+
+ if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
+ sdkp->protection_type = val;
+
+ return count;
+}
+
+static ssize_t
sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -381,7 +403,8 @@ static struct device_attribute sd_disk_attrs[] = {
sd_store_allow_restart),
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
sd_store_manage_start_stop),
- __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+ __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type,
+ sd_store_protection_type),
__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
@@ -804,9 +827,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[0] = WRITE_6;
SCpnt->sc_data_direction = DMA_TO_DEVICE;
- if (blk_integrity_rq(rq) &&
- sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
- goto out;
+ if (blk_integrity_rq(rq))
+ sd_dif_prepare(rq, block, sdp->sector_size);
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
@@ -1671,34 +1693,42 @@ sd_spinup_disk(struct scsi_disk *sdkp)
/*
* Determine whether disk supports Data Integrity Field.
*/
-static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
u8 type;
+ int ret = 0;
if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
- return;
+ return ret;
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
- if (type == sdkp->protection_type || !sdkp->first_scan)
- return;
+ if (type > SD_DIF_TYPE3_PROTECTION)
+ ret = -ENODEV;
+ else if (scsi_host_dif_capable(sdp->host, type))
+ ret = 1;
+
+ if (sdkp->first_scan || type != sdkp->protection_type)
+ switch (ret) {
+ case -ENODEV:
+ sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
+ " protection type %u. Disabling disk!\n",
+ type);
+ break;
+ case 1:
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIF Type %u protection\n", type);
+ break;
+ case 0:
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %u protection\n", type);
+ break;
+ }
sdkp->protection_type = type;
- if (type > SD_DIF_TYPE3_PROTECTION) {
- sd_printk(KERN_ERR, sdkp, "formatted with unsupported " \
- "protection type %u. Disabling disk!\n", type);
- sdkp->capacity = 0;
- return;
- }
-
- if (scsi_host_dif_capable(sdp->host, type))
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIF Type %u protection\n", type);
- else
- sd_printk(KERN_NOTICE, sdkp,
- "Disabling DIF Type %u protection\n", type);
+ return ret;
}
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -1794,7 +1824,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sector_size = get_unaligned_be32(&buffer[8]);
lba = get_unaligned_be64(&buffer[0]);
- sd_read_protection_type(sdkp, buffer);
+ if (sd_read_protection_type(sdkp, buffer) < 0) {
+ sdkp->capacity = 0;
+ return -ENODEV;
+ }
if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
@@ -2632,7 +2665,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
add_disk(gd);
- sd_dif_config_host(sdkp);
+ if (sdkp->capacity)
+ sd_dif_config_host(sdkp);
sd_revalidate_disk(gd);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index f703f4827b6f..47c52a6d733c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -156,7 +156,7 @@ struct sd_dif_tuple {
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
-extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
+extern void sd_dif_prepare(struct request *rq, sector_t, unsigned int);
extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index e52d5bc42bc4..04998f36e507 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -366,7 +366,8 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
*
* Type 3 does not have a reference tag so no remapping is required.
*/
-int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
+void sd_dif_prepare(struct request *rq, sector_t hw_sector,
+ unsigned int sector_sz)
{
const int tuple_sz = sizeof(struct sd_dif_tuple);
struct bio *bio;
@@ -378,7 +379,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
sdkp = rq->bio->bi_bdev->bd_disk->private_data;
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
- return 0;
+ return;
phys = hw_sector & 0xffffffff;
@@ -397,10 +398,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
- if (be32_to_cpu(sdt->ref_tag) != virt)
- goto error;
+ if (be32_to_cpu(sdt->ref_tag) == virt)
+ sdt->ref_tag = cpu_to_be32(phys);
- sdt->ref_tag = cpu_to_be32(phys);
virt++;
phys++;
}
@@ -410,16 +410,6 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
}
-
- return 0;
-
-error:
- kunmap_atomic(sdt);
- sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
- __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
- be16_to_cpu(sdt->app_tag));
-
- return -EILSEQ;
}
/*
@@ -463,10 +453,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
return;
}
- if (be32_to_cpu(sdt->ref_tag) != phys &&
- sdt->app_tag != 0xffff)
- sdt->ref_tag = 0xffffffff; /* Bad ref */
- else
+ if (be32_to_cpu(sdt->ref_tag) == phys)
sdt->ref_tag = cpu_to_be32(virt);
virt++;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e41998cb098e..98156a97c472 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -37,6 +37,7 @@ static const char *verstr = "20101219";
#include <linux/blkdev.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -74,17 +75,14 @@ static const char *verstr = "20101219";
#include "st_options.h"
#include "st.h"
-static DEFINE_MUTEX(st_mutex);
static int buffer_kbs;
static int max_sg_segs;
static int try_direct_io = TRY_DIRECT_IO;
static int try_rdio = 1;
static int try_wdio = 1;
-static int st_dev_max;
-static int st_nr_dev;
-
-static struct class *st_sysfs_class;
+static struct class st_sysfs_class;
+static struct device_attribute st_dev_attrs[];
MODULE_AUTHOR("Kai Makisara");
MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -173,13 +171,9 @@ static int debugging = DEBUG;
24 bits) */
#define SET_DENS_AND_BLK 0x10001
-static DEFINE_RWLOCK(st_dev_arr_lock);
-
static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE;
static int st_max_sg_segs = ST_MAX_SG;
-static struct scsi_tape **scsi_tapes = NULL;
-
static int modes_defined;
static int enlarge_buffer(struct st_buffer *, int, int);
@@ -198,7 +192,6 @@ static int st_remove(struct device *);
static int do_create_sysfs_files(void);
static void do_remove_sysfs_files(void);
-static int do_create_class_files(struct scsi_tape *, int, int);
static struct scsi_driver st_template = {
.owner = THIS_MODULE,
@@ -221,6 +214,10 @@ static void scsi_tape_release(struct kref *);
#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
static DEFINE_MUTEX(st_ref_mutex);
+static DEFINE_SPINLOCK(st_index_lock);
+static DEFINE_SPINLOCK(st_use_lock);
+static DEFINE_IDR(st_index_idr);
+
#include "osst_detect.h"
@@ -238,10 +235,9 @@ static struct scsi_tape *scsi_tape_get(int dev)
struct scsi_tape *STp = NULL;
mutex_lock(&st_ref_mutex);
- write_lock(&st_dev_arr_lock);
+ spin_lock(&st_index_lock);
- if (dev < st_dev_max && scsi_tapes != NULL)
- STp = scsi_tapes[dev];
+ STp = idr_find(&st_index_idr, dev);
if (!STp) goto out;
kref_get(&STp->kref);
@@ -258,7 +254,7 @@ out_put:
kref_put(&STp->kref, scsi_tape_release);
STp = NULL;
out:
- write_unlock(&st_dev_arr_lock);
+ spin_unlock(&st_index_lock);
mutex_unlock(&st_ref_mutex);
return STp;
}
@@ -1188,7 +1184,6 @@ static int st_open(struct inode *inode, struct file *filp)
int dev = TAPE_NR(inode);
char *name;
- mutex_lock(&st_mutex);
/*
* We really want to do nonseekable_open(inode, filp); here, but some
* versions of tar incorrectly call lseek on tapes and bail out if that
@@ -1197,24 +1192,22 @@ static int st_open(struct inode *inode, struct file *filp)
filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
if (!(STp = scsi_tape_get(dev))) {
- mutex_unlock(&st_mutex);
return -ENXIO;
}
- write_lock(&st_dev_arr_lock);
filp->private_data = STp;
name = tape_name(STp);
+ spin_lock(&st_use_lock);
if (STp->in_use) {
- write_unlock(&st_dev_arr_lock);
+ spin_unlock(&st_use_lock);
scsi_tape_put(STp);
- mutex_unlock(&st_mutex);
DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
return (-EBUSY);
}
STp->in_use = 1;
- write_unlock(&st_dev_arr_lock);
+ spin_unlock(&st_use_lock);
STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
if (scsi_autopm_get_device(STp->device) < 0) {
@@ -1262,16 +1255,16 @@ static int st_open(struct inode *inode, struct file *filp)
retval = (-EIO);
goto err_out;
}
- mutex_unlock(&st_mutex);
return 0;
err_out:
normalize_buffer(STp->buffer);
+ spin_lock(&st_use_lock);
STp->in_use = 0;
+ spin_unlock(&st_use_lock);
scsi_tape_put(STp);
if (resumed)
scsi_autopm_put_device(STp->device);
- mutex_unlock(&st_mutex);
return retval;
}
@@ -1403,9 +1396,9 @@ static int st_release(struct inode *inode, struct file *filp)
do_door_lock(STp, 0);
normalize_buffer(STp->buffer);
- write_lock(&st_dev_arr_lock);
+ spin_lock(&st_use_lock);
STp->in_use = 0;
- write_unlock(&st_dev_arr_lock);
+ spin_unlock(&st_use_lock);
scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
@@ -3992,16 +3985,98 @@ static const struct file_operations st_fops =
.llseek = noop_llseek,
};
+static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
+{
+ int i, error;
+ dev_t cdev_devno;
+ struct cdev *cdev;
+ struct device *dev;
+ struct st_modedef *STm = &(tape->modes[mode]);
+ char name[10];
+ int dev_num = tape->index;
+
+ cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew));
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ pr_err("st%d: out of memory. Device not attached.\n", dev_num);
+ error = -ENOMEM;
+ goto out;
+ }
+ cdev->owner = THIS_MODULE;
+ cdev->ops = &st_fops;
+
+ error = cdev_add(cdev, cdev_devno, 1);
+ if (error) {
+ pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num,
+ rew ? "non" : "auto", mode);
+ pr_err("st%d: Device not attached.\n", dev_num);
+ goto out_free;
+ }
+ STm->cdevs[rew] = cdev;
+
+ i = mode << (4 - ST_NBR_MODE_BITS);
+ snprintf(name, 10, "%s%s%s", rew ? "n" : "",
+ tape->disk->disk_name, st_formats[i]);
+
+ dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
+ cdev_devno, &tape->modes[mode], "%s", name);
+ if (IS_ERR(dev)) {
+ pr_err("st%d: device_create failed\n", dev_num);
+ error = PTR_ERR(dev);
+ goto out_free;
+ }
+
+ STm->devs[rew] = dev;
+
+ return 0;
+out_free:
+ cdev_del(STm->cdevs[rew]);
+ STm->cdevs[rew] = NULL;
+out:
+ return error;
+}
+
+static int create_cdevs(struct scsi_tape *tape)
+{
+ int mode, error;
+ for (mode = 0; mode < ST_NBR_MODES; ++mode) {
+ error = create_one_cdev(tape, mode, 0);
+ if (error)
+ return error;
+ error = create_one_cdev(tape, mode, 1);
+ if (error)
+ return error;
+ }
+
+ return sysfs_create_link(&tape->device->sdev_gendev.kobj,
+ &tape->modes[0].devs[0]->kobj, "tape");
+}
+
+static void remove_cdevs(struct scsi_tape *tape)
+{
+ int mode, rew;
+ sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape");
+ for (mode = 0; mode < ST_NBR_MODES; mode++) {
+ struct st_modedef *STm = &(tape->modes[mode]);
+ for (rew = 0; rew < 2; rew++) {
+ if (STm->cdevs[rew])
+ cdev_del(STm->cdevs[rew]);
+ if (STm->devs[rew])
+ device_unregister(STm->devs[rew]);
+ }
+ }
+}
+
static int st_probe(struct device *dev)
{
struct scsi_device *SDp = to_scsi_device(dev);
struct gendisk *disk = NULL;
- struct cdev *cdev = NULL;
struct scsi_tape *tpnt = NULL;
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *buffer;
- int i, j, mode, dev_num, error;
+ int i, dev_num, error;
char *stp;
if (SDp->type != TYPE_TAPE)
@@ -4028,58 +4103,16 @@ static int st_probe(struct device *dev)
goto out_buffer_free;
}
- write_lock(&st_dev_arr_lock);
- if (st_nr_dev >= st_dev_max) {
- struct scsi_tape **tmp_da;
- int tmp_dev_max;
-
- tmp_dev_max = max(st_nr_dev * 2, 8);
- if (tmp_dev_max > ST_MAX_TAPES)
- tmp_dev_max = ST_MAX_TAPES;
- if (tmp_dev_max <= st_nr_dev) {
- write_unlock(&st_dev_arr_lock);
- printk(KERN_ERR "st: Too many tape devices (max. %d).\n",
- ST_MAX_TAPES);
- goto out_put_disk;
- }
-
- tmp_da = kzalloc(tmp_dev_max * sizeof(struct scsi_tape *), GFP_ATOMIC);
- if (tmp_da == NULL) {
- write_unlock(&st_dev_arr_lock);
- printk(KERN_ERR "st: Can't extend device array.\n");
- goto out_put_disk;
- }
-
- if (scsi_tapes != NULL) {
- memcpy(tmp_da, scsi_tapes,
- st_dev_max * sizeof(struct scsi_tape *));
- kfree(scsi_tapes);
- }
- scsi_tapes = tmp_da;
-
- st_dev_max = tmp_dev_max;
- }
-
- for (i = 0; i < st_dev_max; i++)
- if (scsi_tapes[i] == NULL)
- break;
- if (i >= st_dev_max)
- panic("scsi_devices corrupt (st)");
-
tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
if (tpnt == NULL) {
- write_unlock(&st_dev_arr_lock);
printk(KERN_ERR "st: Can't allocate device descriptor.\n");
goto out_put_disk;
}
kref_init(&tpnt->kref);
tpnt->disk = disk;
- sprintf(disk->disk_name, "st%d", i);
disk->private_data = &tpnt->driver;
disk->queue = SDp->request_queue;
tpnt->driver = &st_template;
- scsi_tapes[i] = tpnt;
- dev_num = i;
tpnt->device = SDp;
if (SDp->scsi_level <= 2)
@@ -4125,6 +4158,7 @@ static int st_probe(struct device *dev)
STm->default_compression = ST_DONT_TOUCH;
STm->default_blksize = (-1); /* No forced size */
STm->default_density = (-1); /* No forced density */
+ STm->tape = tpnt;
}
for (i = 0; i < ST_NBR_PARTITIONS; i++) {
@@ -4144,38 +4178,34 @@ static int st_probe(struct device *dev)
tpnt->blksize_changed = 0;
mutex_init(&tpnt->lock);
- st_nr_dev++;
- write_unlock(&st_dev_arr_lock);
+ if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
+ pr_warn("st: idr expansion failed\n");
+ error = -ENOMEM;
+ goto out_put_disk;
+ }
- for (mode = 0; mode < ST_NBR_MODES; ++mode) {
- STm = &(tpnt->modes[mode]);
- for (j=0; j < 2; j++) {
- cdev = cdev_alloc();
- if (!cdev) {
- printk(KERN_ERR
- "st%d: out of memory. Device not attached.\n",
- dev_num);
- goto out_free_tape;
- }
- cdev->owner = THIS_MODULE;
- cdev->ops = &st_fops;
-
- error = cdev_add(cdev,
- MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, j)),
- 1);
- if (error) {
- printk(KERN_ERR "st%d: Can't add %s-rewind mode %d\n",
- dev_num, j ? "non" : "auto", mode);
- printk(KERN_ERR "st%d: Device not attached.\n", dev_num);
- goto out_free_tape;
- }
- STm->cdevs[j] = cdev;
+ spin_lock(&st_index_lock);
+ error = idr_get_new(&st_index_idr, tpnt, &dev_num);
+ spin_unlock(&st_index_lock);
+ if (error) {
+ pr_warn("st: idr allocation failed: %d\n", error);
+ goto out_put_disk;
+ }
- }
- error = do_create_class_files(tpnt, dev_num, mode);
- if (error)
- goto out_free_tape;
+ if (dev_num > ST_MAX_TAPES) {
+ pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
+ goto out_put_index;
}
+
+ tpnt->index = dev_num;
+ sprintf(disk->disk_name, "st%d", dev_num);
+
+ dev_set_drvdata(dev, tpnt);
+
+
+ error = create_cdevs(tpnt);
+ if (error)
+ goto out_remove_devs;
scsi_autopm_put_device(SDp);
sdev_printk(KERN_NOTICE, SDp,
@@ -4186,28 +4216,12 @@ static int st_probe(struct device *dev)
return 0;
-out_free_tape:
- for (mode=0; mode < ST_NBR_MODES; mode++) {
- STm = &(tpnt->modes[mode]);
- sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
- "tape");
- for (j=0; j < 2; j++) {
- if (STm->cdevs[j]) {
- if (cdev == STm->cdevs[j])
- cdev = NULL;
- device_destroy(st_sysfs_class,
- MKDEV(SCSI_TAPE_MAJOR,
- TAPE_MINOR(i, mode, j)));
- cdev_del(STm->cdevs[j]);
- }
- }
- }
- if (cdev)
- cdev_del(cdev);
- write_lock(&st_dev_arr_lock);
- scsi_tapes[dev_num] = NULL;
- st_nr_dev--;
- write_unlock(&st_dev_arr_lock);
+out_remove_devs:
+ remove_cdevs(tpnt);
+out_put_index:
+ spin_lock(&st_index_lock);
+ idr_remove(&st_index_idr, dev_num);
+ spin_unlock(&st_index_lock);
out_put_disk:
put_disk(disk);
kfree(tpnt);
@@ -4220,38 +4234,18 @@ out:
static int st_remove(struct device *dev)
{
- struct scsi_device *SDp = to_scsi_device(dev);
- struct scsi_tape *tpnt;
- int i, j, mode;
-
- scsi_autopm_get_device(SDp);
- write_lock(&st_dev_arr_lock);
- for (i = 0; i < st_dev_max; i++) {
- tpnt = scsi_tapes[i];
- if (tpnt != NULL && tpnt->device == SDp) {
- scsi_tapes[i] = NULL;
- st_nr_dev--;
- write_unlock(&st_dev_arr_lock);
- sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
- "tape");
- for (mode = 0; mode < ST_NBR_MODES; ++mode) {
- for (j=0; j < 2; j++) {
- device_destroy(st_sysfs_class,
- MKDEV(SCSI_TAPE_MAJOR,
- TAPE_MINOR(i, mode, j)));
- cdev_del(tpnt->modes[mode].cdevs[j]);
- tpnt->modes[mode].cdevs[j] = NULL;
- }
- }
+ struct scsi_tape *tpnt = dev_get_drvdata(dev);
+ int index = tpnt->index;
- mutex_lock(&st_ref_mutex);
- kref_put(&tpnt->kref, scsi_tape_release);
- mutex_unlock(&st_ref_mutex);
- return 0;
- }
- }
+ scsi_autopm_get_device(to_scsi_device(dev));
+ remove_cdevs(tpnt);
- write_unlock(&st_dev_arr_lock);
+ mutex_lock(&st_ref_mutex);
+ kref_put(&tpnt->kref, scsi_tape_release);
+ mutex_unlock(&st_ref_mutex);
+ spin_lock(&st_index_lock);
+ idr_remove(&st_index_idr, index);
+ spin_unlock(&st_index_lock);
return 0;
}
@@ -4283,6 +4277,11 @@ static void scsi_tape_release(struct kref *kref)
return;
}
+static struct class st_sysfs_class = {
+ .name = "scsi_tape",
+ .dev_attrs = st_dev_attrs,
+};
+
static int __init init_st(void)
{
int err;
@@ -4292,10 +4291,10 @@ static int __init init_st(void)
printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
verstr, st_fixed_buffer_size, st_max_sg_segs);
- st_sysfs_class = class_create(THIS_MODULE, "scsi_tape");
- if (IS_ERR(st_sysfs_class)) {
- printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n");
- return PTR_ERR(st_sysfs_class);
+ err = class_register(&st_sysfs_class);
+ if (err) {
+ pr_err("Unable register sysfs class for SCSI tapes\n");
+ return err;
}
err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
@@ -4322,7 +4321,7 @@ err_chrdev:
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
ST_MAX_TAPE_ENTRIES);
err_class:
- class_destroy(st_sysfs_class);
+ class_unregister(&st_sysfs_class);
return err;
}
@@ -4332,8 +4331,7 @@ static void __exit exit_st(void)
scsi_unregister_driver(&st_template.gendrv);
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
ST_MAX_TAPE_ENTRIES);
- class_destroy(st_sysfs_class);
- kfree(scsi_tapes);
+ class_unregister(&st_sysfs_class);
printk(KERN_INFO "st: Unloaded.\n");
}
@@ -4405,10 +4403,9 @@ static void do_remove_sysfs_files(void)
driver_remove_file(sysfs, &driver_attr_try_direct_io);
}
-
/* The sysfs simple class interface */
static ssize_t
-st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
+defined_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
@@ -4417,10 +4414,9 @@ st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
return l;
}
-DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
-
static ssize_t
-st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
+default_blksize_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
@@ -4429,10 +4425,10 @@ st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
return l;
}
-DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
static ssize_t
-st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
+default_density_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
@@ -4443,11 +4439,9 @@ st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
return l;
}
-DEVICE_ATTR(default_density, S_IRUGO, st_defdensity_show, NULL);
-
static ssize_t
-st_defcompression_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+default_compression_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
@@ -4456,28 +4450,14 @@ st_defcompression_show(struct device *dev, struct device_attribute *attr,
return l;
}
-DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
-
static ssize_t
-st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
+options_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct st_modedef *STm = dev_get_drvdata(dev);
- struct scsi_tape *STp;
- int i, j, options;
+ struct scsi_tape *STp = STm->tape;
+ int options;
ssize_t l = 0;
- for (i=0; i < st_dev_max; i++) {
- for (j=0; j < ST_NBR_MODES; j++)
- if (&scsi_tapes[i]->modes[j] == STm)
- break;
- if (j < ST_NBR_MODES)
- break;
- }
- if (i == st_dev_max)
- return 0; /* should never happen */
-
- STp = scsi_tapes[i];
-
options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0;
options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0;
options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0;
@@ -4498,66 +4478,14 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
return l;
}
-DEVICE_ATTR(options, S_IRUGO, st_options_show, NULL);
-
-static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
-{
- int i, rew, error;
- char name[10];
- struct device *st_class_member;
-
- for (rew=0; rew < 2; rew++) {
- /* Make sure that the minor numbers corresponding to the four
- first modes always get the same names */
- i = mode << (4 - ST_NBR_MODE_BITS);
- snprintf(name, 10, "%s%s%s", rew ? "n" : "",
- STp->disk->disk_name, st_formats[i]);
- st_class_member =
- device_create(st_sysfs_class, &STp->device->sdev_gendev,
- MKDEV(SCSI_TAPE_MAJOR,
- TAPE_MINOR(dev_num, mode, rew)),
- &STp->modes[mode], "%s", name);
- if (IS_ERR(st_class_member)) {
- printk(KERN_WARNING "st%d: device_create failed\n",
- dev_num);
- error = PTR_ERR(st_class_member);
- goto out;
- }
-
- error = device_create_file(st_class_member,
- &dev_attr_defined);
- if (error) goto out;
- error = device_create_file(st_class_member,
- &dev_attr_default_blksize);
- if (error) goto out;
- error = device_create_file(st_class_member,
- &dev_attr_default_density);
- if (error) goto out;
- error = device_create_file(st_class_member,
- &dev_attr_default_compression);
- if (error) goto out;
- error = device_create_file(st_class_member,
- &dev_attr_options);
- if (error) goto out;
-
- if (mode == 0 && rew == 0) {
- error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
- &st_class_member->kobj,
- "tape");
- if (error) {
- printk(KERN_ERR
- "st%d: Can't create sysfs link from SCSI device.\n",
- dev_num);
- goto out;
- }
- }
- }
-
- return 0;
-
-out:
- return error;
-}
+static struct device_attribute st_dev_attrs[] = {
+ __ATTR_RO(defined),
+ __ATTR_RO(default_blksize),
+ __ATTR_RO(default_density),
+ __ATTR_RO(default_compression),
+ __ATTR_RO(options),
+ __ATTR_NULL,
+};
/* The following functions may be useful for a larger audience. */
static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index b548923785ed..f3eee0f9f40c 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -66,6 +66,8 @@ struct st_modedef {
unsigned char default_compression; /* 0 = don't touch, etc */
short default_density; /* Forced density, -1 = no value */
int default_blksize; /* Forced blocksize, -1 = no value */
+ struct scsi_tape *tape;
+ struct device *devs[2]; /* Auto-rewind and non-rewind devices */
struct cdev *cdevs[2]; /* Auto-rewind and non-rewind devices */
};
@@ -76,7 +78,7 @@ struct st_modedef {
#define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS)
#define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT)
-#define ST_MAX_TAPES 128
+#define ST_MAX_TAPES (1 << (20 - (ST_NBR_MODE_BITS + 1)))
#define ST_MAX_TAPE_ENTRIES (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1))
/* The status related to each partition */
@@ -99,6 +101,7 @@ struct scsi_tape {
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
struct st_buffer *buffer;
+ int index;
/* Drive characteristics */
unsigned char omit_blklims;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 64f90e17e51d..a22707186421 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -184,6 +184,7 @@ enum {
ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
+ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -986,8 +987,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
-extern void ata_host_init(struct ata_host *, struct device *,
- unsigned long, struct ata_port_operations *);
+extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
@@ -1012,6 +1012,17 @@ extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
+extern int ata_sas_port_async_suspend(struct ata_port *ap, int *async);
+extern int ata_sas_port_async_resume(struct ata_port *ap, int *async);
+#else
+static inline int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+{
+ return 0;
+}
+static inline int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+{
+ return 0;
+}
#endif
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6b4565c440c8..416db39d484b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -753,6 +753,7 @@
#define PCI_DEVICE_ID_HP_CISSD 0x3238
#define PCI_DEVICE_ID_HP_CISSE 0x323a
#define PCI_DEVICE_ID_HP_CISSF 0x323b
+#define PCI_DEVICE_ID_HP_CISSH 0x323c
#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
#define PCI_VENDOR_ID_PCTECH 0x1042
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ae33706afeb0..ef937b56f9b5 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -79,7 +79,8 @@ enum phy_event {
PHYE_OOB_DONE = 1,
PHYE_OOB_ERROR = 2,
PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */
- PHY_NUM_EVENTS = 4,
+ PHYE_RESUME_TIMEOUT = 4,
+ PHY_NUM_EVENTS = 5,
};
enum discover_event {
@@ -87,8 +88,10 @@ enum discover_event {
DISCE_REVALIDATE_DOMAIN = 1,
DISCE_PORT_GONE = 2,
DISCE_PROBE = 3,
- DISCE_DESTRUCT = 4,
- DISC_NUM_EVENTS = 5,
+ DISCE_SUSPEND = 4,
+ DISCE_RESUME = 5,
+ DISCE_DESTRUCT = 6,
+ DISC_NUM_EVENTS = 7,
};
/* ---------- Expander Devices ---------- */
@@ -128,7 +131,7 @@ struct ex_phy {
u8 attached_sas_addr[SAS_ADDR_SIZE];
u8 attached_phy_id;
- u8 phy_change_count;
+ int phy_change_count;
enum routing_attribute routing_attr;
u8 virtual:1;
@@ -141,7 +144,7 @@ struct ex_phy {
struct expander_device {
struct list_head children;
- u16 ex_change_count;
+ int ex_change_count;
u16 max_route_indexes;
u8 num_phys;
@@ -169,6 +172,7 @@ struct sata_device {
enum ata_command_set command_set;
struct smp_resp rps_resp; /* report_phy_sata_resp */
u8 port_no; /* port number, if this is a PM (Port) */
+ int pm_result;
struct ata_port *ap;
struct ata_host ata_host;
@@ -182,6 +186,7 @@ struct ssp_device {
enum {
SAS_DEV_GONE,
+ SAS_DEV_FOUND, /* device notified to lldd */
SAS_DEV_DESTROY,
SAS_DEV_EH_PENDING,
SAS_DEV_LU_RESET,
@@ -273,6 +278,7 @@ struct asd_sas_port {
enum sas_linkrate linkrate;
struct sas_work work;
+ int suspended;
/* public: */
int id;
@@ -321,6 +327,7 @@ struct asd_sas_phy {
unsigned long phy_events_pending;
int error;
+ int suspended;
struct sas_phy *phy;
@@ -687,6 +694,9 @@ struct sas_domain_function_template {
extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
+extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates);
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 2dfbdaa0b34a..ff71a5654684 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -45,6 +45,8 @@ void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
void sas_ata_schedule_reset(struct domain_device *dev);
void sas_ata_wait_eh(struct domain_device *dev);
void sas_probe_sata(struct asd_sas_port *port);
+void sas_suspend_sata(struct asd_sas_port *port);
+void sas_resume_sata(struct asd_sas_port *port);
void sas_ata_end_eh(struct ata_port *ap);
#else
@@ -82,6 +84,14 @@ static inline void sas_probe_sata(struct asd_sas_port *port)
{
}
+static inline void sas_suspend_sata(struct asd_sas_port *port)
+{
+}
+
+static inline void sas_resume_sata(struct asd_sas_port *port)
+{
+}
+
static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
return 0;
diff --git a/include/scsi/scsi_bsg_fc.h b/include/scsi/scsi_bsg_fc.h
index 91a4e4ff9a9b..3031b900b087 100644
--- a/include/scsi/scsi_bsg_fc.h
+++ b/include/scsi/scsi_bsg_fc.h
@@ -26,8 +26,6 @@
* This file intended to be included by both kernel and user space
*/
-#include <scsi/scsi.h>
-
/*
* FC Transport SGIO v4 BSG Message Support
*/
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9895f69294fc..88fae8d20154 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -156,6 +156,7 @@ struct scsi_device {
unsigned is_visible:1; /* is the device visible in sysfs */
unsigned can_power_off:1; /* Device supports runtime power off */
unsigned wce_default_on:1; /* Cache is ON by default */
+ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
struct list_head event_list; /* asserted events */
@@ -476,6 +477,9 @@ static inline int scsi_device_enclosure(struct scsi_device *sdev)
static inline int scsi_device_protection(struct scsi_device *sdev)
{
+ if (sdev->no_dif)
+ return 0;
+
return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
}
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index b4ddd3b18b4c..cc1f3e786ad7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -30,4 +30,5 @@
#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
+#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
#endif
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5f7d5b3b1c6e..49084807eb6b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -873,6 +873,9 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
SHOST_DIF_TYPE2_PROTECTION,
SHOST_DIF_TYPE3_PROTECTION };
+ if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+ return 0;
+
return shost->prot_capabilities & cap[target_type] ? target_type : 0;
}
@@ -884,6 +887,9 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
SHOST_DIX_TYPE2_PROTECTION,
SHOST_DIX_TYPE3_PROTECTION };
+ if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+ return 0;
+
return shost->prot_capabilities & cap[target_type];
#endif
return 0;