summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:55:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:55:29 -0700
commit424a6f6ef990b7e9f56f6627bfc6c46b493faeb4 (patch)
tree0028356ed8003495fbbe1f716f359e3c8ebc35b6 /drivers/scsi/qla2xxx
parent1ab142d499294b844ecc81e8004db4ce029b0b61 (diff)
parentcd8df932d894f3128c884e3ae1b2b484540513db (diff)
downloadlinux-424a6f6ef990b7e9f56f6627bfc6c46b493faeb4.tar.gz
linux-424a6f6ef990b7e9f56f6627bfc6c46b493faeb4.tar.bz2
linux-424a6f6ef990b7e9f56f6627bfc6c46b493faeb4.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
SCSI updates from James Bottomley: "The update includes the usual assortment of driver updates (lpfc, qla2xxx, qla4xxx, bfa, bnx2fc, bnx2i, isci, fcoe, hpsa) plus a huge amount of infrastructure work in the SAS library and transport class as well as an iSCSI update. There's also a new SCSI based virtio driver." * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (177 commits) [SCSI] qla4xxx: Update driver version to 5.02.00-k15 [SCSI] qla4xxx: trivial cleanup [SCSI] qla4xxx: Fix sparse warning [SCSI] qla4xxx: Add support for multiple session per host. [SCSI] qla4xxx: Export CHAP index as sysfs attribute [SCSI] scsi_transport: Export CHAP index as sysfs attribute [SCSI] qla4xxx: Add support to display CHAP list and delete CHAP entry [SCSI] iscsi_transport: Add support to display CHAP list and delete CHAP entry [SCSI] pm8001: fix endian issue with code optimization. [SCSI] pm8001: Fix possible racing condition. [SCSI] pm8001: Fix bogus interrupt state flag issue. [SCSI] ipr: update PCI ID definitions for new adapters [SCSI] qla2xxx: handle default case in qla2x00_request_firmware() [SCSI] isci: improvements in driver unloading routine [SCSI] isci: improve phy event warnings [SCSI] isci: debug, provide state-enum-to-string conversions [SCSI] scsi_transport_sas: 'enable' phys on reset [SCSI] libsas: don't recover end devices attached to disabled phys [SCSI] libsas: fixup target_port_protocols for expanders that don't report sata [SCSI] libsas: set attached device type and target protocols for local phys ...
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c630
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h63
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h117
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c445
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c410
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c435
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c148
19 files changed, 2198 insertions, 1323 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9f41b3b4358f..5926f5a87ea8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -356,7 +356,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@@ -627,144 +628,6 @@ static struct bin_attribute sysfs_reset_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
- uint16_t dev, adr, opt, len;
- int rval;
-
- ha->edc_data_len = 0;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return -EINVAL;
-
- if (!ha->edc_data) {
- ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->edc_data_dma);
- if (!ha->edc_data) {
- ql_log(ql_log_warn, vha, 0x7073,
- "Unable to allocate memory for EDC write.\n");
- return -ENOMEM;
- }
- }
-
- dev = le16_to_cpup((void *)&buf[0]);
- adr = le16_to_cpup((void *)&buf[2]);
- opt = le16_to_cpup((void *)&buf[4]);
- len = le16_to_cpup((void *)&buf[6]);
-
- if (!(opt & BIT_0))
- if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
- return -EINVAL;
-
- memcpy(ha->edc_data, &buf[8], len);
-
- rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
- dev, adr, len, opt);
- if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
- rval, dev, adr, opt, len, buf[8]);
- return -EIO;
- }
-
- return count;
-}
-
-static struct bin_attribute sysfs_edc_attr = {
- .attr = {
- .name = "edc",
- .mode = S_IWUSR,
- },
- .size = 0,
- .write = qla2x00_sysfs_write_edc,
-};
-
-static ssize_t
-qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
- uint16_t dev, adr, opt, len;
- int rval;
-
- ha->edc_data_len = 0;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return -EINVAL;
-
- if (!ha->edc_data) {
- ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->edc_data_dma);
- if (!ha->edc_data) {
- ql_log(ql_log_warn, vha, 0x708c,
- "Unable to allocate memory for EDC status.\n");
- return -ENOMEM;
- }
- }
-
- dev = le16_to_cpup((void *)&buf[0]);
- adr = le16_to_cpup((void *)&buf[2]);
- opt = le16_to_cpup((void *)&buf[4]);
- len = le16_to_cpup((void *)&buf[6]);
-
- if (!(opt & BIT_0))
- if (len == 0 || len > DMA_POOL_SIZE)
- return -EINVAL;
-
- memset(ha->edc_data, 0, len);
- rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
- dev, adr, len, opt);
- if (rval != QLA_SUCCESS) {
- ql_log(ql_log_info, vha, 0x7075,
- "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
- rval, dev, adr, opt, len);
- return -EIO;
- }
-
- ha->edc_data_len = len;
-
- return count;
-}
-
-static ssize_t
-qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
- return 0;
-
- if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
- return -EINVAL;
-
- memcpy(buf, ha->edc_data, ha->edc_data_len);
-
- return ha->edc_data_len;
-}
-
-static struct bin_attribute sysfs_edc_status_attr = {
- .attr = {
- .name = "edc_status",
- .mode = S_IRUSR | S_IWUSR,
- },
- .size = 0,
- .write = qla2x00_sysfs_write_edc_status,
- .read = qla2x00_sysfs_read_edc_status,
-};
-
-static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -879,8 +742,6 @@ static struct sysfs_entry {
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ "reset", &sysfs_reset_attr, },
- { "edc", &sysfs_edc_attr, 2 },
- { "edc_status", &sysfs_edc_status_attr, 2 },
{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },
@@ -898,7 +759,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -926,7 +787,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1231,7 +1092,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1278,7 +1139,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1293,7 +1154,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1316,7 +1177,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1328,7 +1189,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1364,7 +1225,7 @@ qla2x00_thermal_temp_show(struct device *dev,
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
- temp = frac = 0;
+ return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
}
@@ -1493,6 +1354,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_10GB:
speed = FC_PORTSPEED_10GBIT;
break;
+ case PORT_SPEED_16GB:
+ speed = FC_PORTSPEED_16GBIT;
+ break;
}
fc_host_speed(shost) = speed;
}
@@ -1643,10 +1507,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID &&
- !test_bit(UNLOADING, &fcport->vha->dpc_flags))
- fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw))
+ fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ else
+ qla2x00_port_logout(fcport->vha, fcport);
+ }
}
static int
@@ -1889,6 +1757,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
break;
}
}
+
if (qos) {
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
@@ -2086,7 +1955,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2c4714279bcc..f74cc0602f3b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,29 +11,36 @@
#include <linux/delay.h>
/* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+void
+qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
- srb_t *sp;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
+ sp->free(vha, sp);
+}
+
+void
+qla2x00_bsg_sp_free(void *data, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- goto done;
- }
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
- ctx->iocbs = 1;
-done:
- return sp;
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ if (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_ELS_CMD_HST)
+ kfree(sp->fcport);
+ mempool_free(sp, vha->hw->srb_mempool);
}
int
@@ -101,8 +108,6 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
uint32_t len;
uint32_t oper;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
@@ -217,6 +222,7 @@ exit_fcp_prio_cfg:
bsg_job->job_done(bsg_job);
return ret;
}
+
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
@@ -230,7 +236,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- struct srb_ctx *els;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
@@ -337,20 +342,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
rval = -ENOMEM;
goto done_unmap_sg;
}
- els = sp->ctx;
- els->type =
+ sp->type =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
- els->name =
+ sp->name =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
"bsg_els_rpt" : "bsg_els_hst");
- els->u.bsg_job = bsg_job;
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
@@ -362,7 +368,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_unmap_sg;
@@ -409,7 +414,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
uint16_t loop_id;
struct fc_port *fcport;
char *type = "FC_BSG_HST_CT";
- struct srb_ctx *ct;
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@@ -486,19 +490,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
fcport->loop_id = loop_id;
/* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x7015,
- "qla2x00_get_ctx_bsg_sp failed.\n");
+ "qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
- ct = sp->ctx;
- ct->type = SRB_CT_CMD;
- ct->name = "bsg_ct";
- ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
- ct->u.bsg_job = bsg_job;
+ sp->type = SRB_CT_CMD;
+ sp->name = "bsg_ct";
+ sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
@@ -511,7 +516,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
@@ -540,7 +544,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -582,7 +586,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -707,7 +711,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if ((ha->current_topology == ISP_CFG_F ||
(atomic_read(&vha->loop_state) == LOOP_DOWN) ||
- (IS_QLA81XX(ha) &&
+ ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -717,13 +721,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
goto done_free_dma_req;
@@ -737,8 +740,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
new_config)) {
ql_log(ql_log_warn, vha, 0x7024,
"Internal loopback failed.\n");
- bsg_job->reply->reply_payload_rcv_len =
- 0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@@ -750,8 +751,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
*/
if (qla81xx_reset_internal_loopback(vha,
config, 1)) {
- bsg_job->reply->reply_payload_rcv_len =
- 0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@@ -788,7 +787,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"MPI reset failed.\n");
}
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
goto done_free_dma_req;
@@ -813,7 +811,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
rval = 0;
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
@@ -872,7 +869,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@@ -971,9 +968,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
-
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@@ -1159,7 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@@ -1210,8 +1206,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint8_t *rsp_ptr = NULL;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (!IS_IIDMA_CAPABLE(vha->hw)) {
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
@@ -1304,8 +1298,6 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
int valid = 0;
struct qla_hw_data *ha = vha->hw;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
@@ -1331,7 +1323,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_QLA8XXX_TYPE(ha))
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
@@ -1617,6 +1609,9 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host;
scsi_qla_host_t *vha;
+ /* In case no data transferred. */
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
fcport = *(fc_port_t **) rport->dd_data;
@@ -1655,6 +1650,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
+ bsg_job->reply->result = ret;
break;
}
return ret;
@@ -1669,7 +1665,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
int cnt, que;
unsigned long flags;
struct req_que *req;
- struct srb_ctx *sp_bsg;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1681,11 +1676,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- sp_bsg = sp->ctx;
-
- if (((sp_bsg->type == SRB_CT_CMD) ||
- (sp_bsg->type == SRB_ELS_CMD_HST))
- && (sp_bsg->u.bsg_job == bsg_job)) {
+ if (((sp->type == SRB_CT_CMD) ||
+ (sp->type == SRB_ELS_CMD_HST))
+ && (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
@@ -1715,7 +1708,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45cbf0ba624d..897731b93df2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,23 +11,27 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0116 | 0xfa |
- * | Mailbox commands | 0x112b | |
- * | Device Discovery | 0x2084 | |
- * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
- * | | | 0x302e |
+ * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x113e | 0x112c-0x112e |
+ * | | | 0x113a |
+ * | Device Discovery | 0x2086 | 0x2020-0x2022 |
+ * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
+ * | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
- * | Async Events | 0x5057 | 0x5052 |
- * | Timer Routines | 0x6011 | 0x600e,0x600f |
- * | User Space Interactions | 0x709e | 0x7018,0x702e |
- * | | | 0x7039,0x7045 |
+ * | Async Events | 0x505d | 0x502b-0x502f |
+ * | | | 0x5047,0x5052 |
+ * | Timer Routines | 0x6011 | 0x600e-0x600f |
+ * | User Space Interactions | 0x709f | 0x7018,0x702e, |
+ * | | | 0x7039,0x7045, |
+ * | | | 0x7073-0x7075, |
+ * | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb052 | |
- * | MultiQ | 0xc00b | |
- * | Misc | 0xd00b | |
+ * | ISP82XX Specific | 0xb054 | 0xb053 |
+ * | MultiQ | 0xc00c | |
+ * | Misc | 0xd010 | |
* ----------------------------------------------------------------------
*/
@@ -85,7 +89,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- dwords = GID_LIST_SIZE / 4;
+ dwords = qla2x00_gid_list_size(ha) / 4;
for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
cnt += dwords, addr += dwords) {
if (cnt + dwords > ram_dwords)
@@ -260,7 +264,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- words = GID_LIST_SIZE / 2;
+ words = qla2x00_gid_list_size(ha) / 2;
for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
cnt += words, addr += words) {
if (cnt + words > ram_words)
@@ -375,6 +379,77 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ struct req_que *req;
+ struct rsp_que *rsp;
+ int que;
+
+ if (!ha->mqenable)
+ return ptr;
+
+ /* Request queues */
+ for (que = 1; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (req->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(req->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, req->ring, req->length * sizeof(request_t));
+ ptr += req->length * sizeof(request_t);
+ }
+
+ /* Response queues */
+ for (que = 1; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+ if (!rsp)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (rsp->length * sizeof(response_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(rsp->length * sizeof(response_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
+ ptr += rsp->length * sizeof(response_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
uint32_t cnt, que_idx;
@@ -382,7 +457,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
struct device_reg_25xxmq __iomem *reg;
- if (!ha->mqenable)
+ if (!ha->mqenable || IS_QLA83XX(ha))
return ptr;
mq = ptr;
@@ -1322,12 +1397,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
- qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1636,12 +1715,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
- qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1650,6 +1733,507 @@ qla81xx_fw_dump_failed:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+void
+qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt, reg_data;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla83xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt, *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd00c,
+ "No buffer available for dump!!!\n");
+ goto qla83xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd00d,
+ "Firmware has been previously dumped (%p) -- ignoring "
+ "request...\n", ha->fw_dump);
+ goto qla83xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp83;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /* Pause RISC. */
+ rval = qla24xx_pause_risc(reg);
+ if (rval != QLA_SUCCESS)
+ goto qla83xx_fw_dump_failed_0;
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ dmp_reg = &reg->iobase_window;
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ dmp_reg = &reg->unused_4_1[0];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ dmp_reg = &reg->unused_4_1[2];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ /* select PCR and disable ecc checking and correction */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+
+ /* Host/Risc registers. */
+ iter_reg = fw->host_risc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7040, 16, iter_reg);
+
+ /* PCIe registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ dmp_reg = &reg->iobase_c4;
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ WRT_REG_DWORD(&reg->iobase_window, 0x00);
+ RD_REG_DWORD(&reg->iobase_window);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ iter_reg = fw->xseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ iter_reg = fw->rseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+ qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB170, 16, iter_reg);
+
+ iter_reg = fw->aseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+ qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+ qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
+
+ /* Command DMA registers. */
+ iter_reg = fw->cmd_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
+ qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
+
+ /* RQ0 Array registers. */
+ iter_reg = fw->rq0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
+
+ /* RQ1 Array registers. */
+ iter_reg = fw->rq1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
+
+ /* RP0 Array registers. */
+ iter_reg = fw->rp0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
+
+ /* RP1 Array registers. */
+ iter_reg = fw->rp1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
+
+ iter_reg = fw->at0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
+
+ /* I/O Queue Control registers. */
+ qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+ /* Multi queue registers */
+ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+ &last_chain);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xd00e,
+ "SOFT RESET FAILED, forcing continuation of dump!!!\n");
+ rval = QLA_SUCCESS;
+
+ ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ udelay(5);
+
+ if (!cnt) {
+ nxt = fw->code_ram;
+ nxt += sizeof(fw->code_ram),
+ nxt += (ha->fw_memory_size - 0x100000 + 1);
+ goto copy_queue;
+ } else
+ ql_log(ql_log_warn, vha, 0xd010,
+ "bigger hammer success?\n");
+ }
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla83xx_fw_dump_failed_0;
+
+copy_queue:
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ nxt = qla24xx_copy_eft(ha, nxt);
+
+ /* Chain entries -- started with MQ. */
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla83xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla83xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
@@ -1782,13 +2366,13 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
vaf.va = &va;
switch (level) {
- case 0: /* FATAL LOG */
+ case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
- case 1:
+ case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
- case 2:
+ case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:
@@ -1837,13 +2421,13 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
vaf.va = &va;
switch (level) {
- case 0: /* FATAL LOG */
+ case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
- case 1:
+ case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
- case 2:
+ case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5f1b6d9c3dcb..2157bdf1569a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -165,6 +165,54 @@ struct qla81xx_fw_dump {
uint32_t ext_mem[1];
};
+struct qla83xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_risc_reg[48];
+ uint32_t pcie_regs[4];
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[256];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t xseq_2_reg[16];
+ uint32_t rseq_gp_reg[256];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t rseq_3_reg[16];
+ uint32_t aseq_gp_reg[256];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t aseq_3_reg[16];
+ uint32_t cmd_dma_reg[64];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[256];
+ uint32_t rq0_array_reg[256];
+ uint32_t rq1_array_reg[256];
+ uint32_t rp0_array_reg[256];
+ uint32_t rp1_array_reg[256];
+ uint32_t queue_control_reg[16];
+ uint32_t fb_hdw_reg[432];
+ uint32_t at0_array_reg[128];
+ uint32_t code_ram[0x2400];
+ uint32_t ext_mem[1];
+};
+
#define EFT_NUM_BUFFERS 4
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -192,9 +240,23 @@ struct qla2xxx_mq_chain {
uint32_t qregs[4 * QLA_MQ_SIZE];
};
+struct qla2xxx_mqueue_header {
+ uint32_t queue;
+#define TYPE_REQUEST_QUEUE 0x1
+#define TYPE_RESPONSE_QUEUE 0x2
+ uint32_t number;
+ uint32_t size;
+};
+
+struct qla2xxx_mqueue_chain {
+ uint32_t type;
+ uint32_t chain_size;
+};
+
#define DUMP_CHAIN_VARIANT 0x80000000
#define DUMP_CHAIN_FCE 0x7FFFFAF0
#define DUMP_CHAIN_MQ 0x7FFFFAF1
+#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
@@ -228,6 +290,7 @@ struct qla2xxx_fw_dump {
struct qla24xx_fw_dump isp24;
struct qla25xx_fw_dump isp25;
struct qla81xx_fw_dump isp81;
+ struct qla83xx_fw_dump isp83;
} isp;
};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index af1003f9de1e..a2443031dbe7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -125,17 +125,17 @@
* Fibre Channel device definitions.
*/
#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
-#define MAX_FIBRE_DEVICES 512
+#define MAX_FIBRE_DEVICES_2100 512
+#define MAX_FIBRE_DEVICES_2400 2048
+#define MAX_FIBRE_DEVICES_LOOP 128
+#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
#define MAX_FIBRE_LUNS 0xFFFF
-#define MAX_RSCN_COUNT 32
#define MAX_HOST_COUNT 16
/*
* Host adapter default definitions.
*/
#define MAX_BUSES 1 /* We only have one bus today */
-#define MAX_TARGETS_2100 MAX_FIBRE_DEVICES
-#define MAX_TARGETS_2200 MAX_FIBRE_DEVICES
#define MIN_LUNS 8
#define MAX_LUNS MAX_FIBRE_LUNS
#define MAX_CMDS_PER_LUN 255
@@ -202,20 +202,12 @@ struct sd_dif_tuple {
/*
* SCSI Request Block
*/
-typedef struct srb {
- atomic_t ref_count;
- struct fc_port *fcport;
- uint32_t handle;
-
+struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
-
- uint16_t flags;
-
uint32_t request_sense_length;
uint8_t *request_sense_ptr;
-
void *ctx;
-} srb_t;
+};
/*
* SRB flag definitions
@@ -254,10 +246,7 @@ struct srb_iocb {
} u;
struct timer_list timer;
-
- void (*done)(srb_t *);
- void (*free)(srb_t *);
- void (*timeout)(srb_t *);
+ void (*timeout)(void *);
};
/* Values for srb_ctx type */
@@ -268,16 +257,37 @@ struct srb_iocb {
#define SRB_CT_CMD 5
#define SRB_ADISC_CMD 6
#define SRB_TM_CMD 7
+#define SRB_SCSI_CMD 8
-struct srb_ctx {
+typedef struct srb {
+ atomic_t ref_count;
+ struct fc_port *fcport;
+ uint32_t handle;
+ uint16_t flags;
uint16_t type;
char *name;
int iocbs;
union {
- struct srb_iocb *iocb_cmd;
+ struct srb_iocb iocb_cmd;
struct fc_bsg_job *bsg_job;
+ struct srb_cmd scmd;
} u;
-};
+ void (*done)(void *, void *, int);
+ void (*free)(void *, void *);
+} srb_t;
+
+#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
+#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
+#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
+
+#define GET_CMD_SENSE_LEN(sp) \
+ (sp->u.scmd.request_sense_length)
+#define SET_CMD_SENSE_LEN(sp, len) \
+ (sp->u.scmd.request_sense_length = len)
+#define GET_CMD_SENSE_PTR(sp) \
+ (sp->u.scmd.request_sense_ptr)
+#define SET_CMD_SENSE_PTR(sp, ptr) \
+ (sp->u.scmd.request_sense_ptr = ptr)
struct msg_echo_lb {
dma_addr_t send_dma;
@@ -653,8 +663,10 @@ typedef struct {
#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */
#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */
#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */
+#define MBC_CONFIGURE_VF 0x4b /* Configure VFs */
#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */
#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */
+#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
@@ -1709,6 +1721,7 @@ typedef struct fc_port {
uint16_t vp_idx;
uint8_t fc4_type;
+ uint8_t scan_state;
} fc_port_t;
/*
@@ -1761,7 +1774,6 @@ static const char * const port_state_str[] = {
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
-#define GID_PT_RSP_SIZE (16 + (MAX_FIBRE_DEVICES * 4))
#define GPN_ID_CMD 0x112
#define GPN_ID_REQ_SIZE (16 + 4)
@@ -2051,7 +2063,9 @@ struct ct_sns_rsp {
} ga_nxt;
struct {
- struct ct_sns_gid_pt_data entries[MAX_FIBRE_DEVICES];
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gid_pt_data
+ entries[MAX_FIBRE_DEVICES_MAX];
} gid_pt;
struct {
@@ -2112,7 +2126,11 @@ struct ct_sns_pkt {
#define GID_PT_SNS_SCMD_LEN 6
#define GID_PT_SNS_CMD_SIZE 28
-#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES * 4 + 16)
+/*
+ * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
+ * adapters.
+ */
+#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16)
#define GPN_ID_SNS_SCMD_LEN 6
#define GPN_ID_SNS_CMD_SIZE 28
@@ -2160,7 +2178,6 @@ struct gid_list_info {
uint16_t loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
-#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
/* NPIV */
typedef struct vport_info {
@@ -2261,6 +2278,7 @@ struct isp_operations {
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
#define QLA_PCI_MSIX_CONTROL 0xa2
+#define QLA_83XX_PCI_MSIX_CONTROL 0x92
struct scsi_qla_host;
@@ -2341,7 +2359,7 @@ struct qla_statistics {
#define QLA_MQ_SIZE 32
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
- ((ha->mqenable) ? \
+ ((ha->mqenable || IS_QLA83XX(ha)) ? \
((void *)(ha->mqiobase) +\
(QLA_QUE_PAGE * id)) :\
((void *)(ha->iobase)))
@@ -2461,6 +2479,7 @@ struct qla_hw_data {
#define MIN_IOBASE_LEN 0x100
/* Multi queue data structs */
device_reg_t __iomem *mqiobase;
+ device_reg_t __iomem *msixbase;
uint16_t msix_count;
uint8_t mqenable;
struct req_que **req_q_map;
@@ -2485,6 +2504,7 @@ struct qla_hw_data {
atomic_t loop_down_timer; /* loop down timer */
uint8_t link_down_timeout; /* link down timeout */
uint16_t max_loop_id;
+ uint16_t max_fibre_devices; /* Maximum number of targets */
uint16_t fb_rev;
uint16_t min_external_loopid; /* First external loop Id */
@@ -2494,6 +2514,7 @@ struct qla_hw_data {
#define PORT_SPEED_2GB 0x01
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
+#define PORT_SPEED_16GB 0x05
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
@@ -2515,6 +2536,8 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
+#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
+#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2531,7 +2554,9 @@ struct qla_hw_data {
#define DT_ISP8432 BIT_12
#define DT_ISP8001 BIT_13
#define DT_ISP8021 BIT_14
-#define DT_ISP_LAST (DT_ISP8021 << 1)
+#define DT_ISP2031 BIT_15
+#define DT_ISP8031 BIT_16
+#define DT_ISP_LAST (DT_ISP8031 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2555,26 +2580,30 @@ struct qla_hw_data {
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
+#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
+#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
-#define IS_QLA81XX(ha) (IS_QLA8001(ha))
-#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
+#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+ IS_QLA8031(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA82XX(ha))
-#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
- (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
+ IS_QLA82XX(ha) || IS_QLA83XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -2583,6 +2612,8 @@ struct qla_hw_data {
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
+#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
+#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -2621,10 +2652,6 @@ struct qla_hw_data {
void *sfp_data;
dma_addr_t sfp_data_dma;
- uint8_t *edc_data;
- dma_addr_t edc_data_dma;
- uint16_t edc_data_len;
-
#define XGMAC_DATA_SIZE 4096
void *xgmac_data;
dma_addr_t xgmac_data_dma;
@@ -2653,6 +2680,8 @@ struct qla_hw_data {
void *async_pd;
dma_addr_t async_pd_dma;
+ void *swl;
+
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -2674,6 +2703,8 @@ struct qla_hw_data {
uint16_t fw_minor_version;
uint16_t fw_subminor_version;
uint16_t fw_attributes;
+ uint16_t fw_attributes_h;
+ uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
uint32_t fw_srisc_address;
@@ -2851,7 +2882,6 @@ typedef struct scsi_qla_host {
volatile struct {
uint32_t init_done :1;
uint32_t online :1;
- uint32_t rscn_queue_overflow :1;
uint32_t reset_active :1;
uint32_t management_server_logged_in :1;
@@ -2905,11 +2935,6 @@ typedef struct scsi_qla_host {
- /* RSCN queue. */
- uint32_t rscn_queue[MAX_RSCN_COUNT];
- uint8_t rscn_in_ptr;
- uint8_t rscn_out_ptr;
-
/* Timeout timers. */
uint8_t loop_down_abort_time; /* port down timer */
atomic_t loop_down_timer; /* loop down timer */
@@ -3005,7 +3030,6 @@ typedef struct scsi_qla_host {
#define QLA_ABORTED 0x105
#define QLA_SUSPENDED 0x106
#define QLA_BUSY 0x107
-#define QLA_RSCNS_HANDLED 0x108
#define QLA_ALREADY_REGISTERED 0x109
#define NVRAM_DELAY() udelay(10)
@@ -3021,6 +3045,7 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_25XX 0x200000
#define OPTROM_SIZE_81XX 0x400000
#define OPTROM_SIZE_82XX 0x800000
+#define OPTROM_SIZE_83XX 0x1000000
#define OPTROM_BURST_SIZE 0x1000
#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b4c2b794c6f..499c74e39ee5 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto out;
if (!ha->fce)
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index aa69486dc064..6d7d7758c797 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1327,6 +1327,11 @@ struct qla_flt_header {
#define FLT_REG_GOLD_FW 0x2f
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
+#define FLT_REG_FCOE_FW 0xA4
+#define FLT_REG_FCOE_VPD_0 0xA9
+#define FLT_REG_FCOE_NVRAM_0 0xAA
+#define FLT_REG_FCOE_VPD_1 0xAB
+#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
uint32_t code;
@@ -1494,6 +1499,11 @@ struct access_chip_rsp_84xx {
#define MBC_GET_XGMAC_STATS 0x7a
#define MBC_GET_DCBX_PARAMS 0x51
+/*
+ * ISP83xx mailbox commands
+ */
+#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+
/* Flash access control option field bit definitions */
#define FAC_OPT_FORCE_SEMAPHORE BIT_15
#define FAC_OPT_REQUESTOR_ID BIT_14
@@ -1875,4 +1885,7 @@ struct qla_fcp_prio_cfg {
#define FA_NPIV_CONF0_ADDR_81 0xD1000
#define FA_NPIV_CONF1_ADDR_81 0xD2000
+/* 83XX Flash locations -- occupies second 8MB region. */
+#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 408679be8fdf..9f065804bd12 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
- struct srb_iocb *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
@@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
-
+extern void qla2x00_sp_free_dma(void *, void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -205,8 +202,7 @@ extern int
qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
extern int
-qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
- uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
+qla2x00_get_fw_version(scsi_qla_host_t *);
extern int
qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -371,6 +367,9 @@ qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
extern int
qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
+extern int
+qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -409,8 +408,10 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
extern int qla24xx_beacon_on(struct scsi_qla_host *);
extern int qla24xx_beacon_off(struct scsi_qla_host *);
extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern void qla83xx_beacon_blink(struct scsi_qla_host *);
extern int qla82xx_beacon_on(struct scsi_qla_host *);
extern int qla82xx_beacon_off(struct scsi_qla_host *);
+extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
@@ -541,6 +542,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
+extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_bsg_job_done(void *, void *, int);
+extern void qla2x00_bsg_sp_free(void *, void *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
@@ -576,6 +581,8 @@ extern void qla82xx_start_iocbs(scsi_qla_host_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
@@ -589,6 +596,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+/* 83xx related functions */
+extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
extern int qla82xx_md_get_template(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4aea4ae23300..3128f80441f5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -240,6 +240,12 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
return (rval);
}
+static inline int
+qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
+{
+ return vha->hw->max_fibre_devices * 4 + 16;
+}
+
/**
* qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
* @ha: HA context
@@ -261,20 +267,21 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_gid_pt_data *gid_data;
struct qla_hw_data *ha = vha->hw;
+ uint16_t gid_pt_rsp_size;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gid_pt(vha, list);
gid_data = NULL;
-
+ gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
/* Issue GID_PT */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
- GID_PT_RSP_SIZE);
+ gid_pt_rsp_size);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
- GID_PT_RSP_SIZE);
+ gid_pt_rsp_size);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_type */
@@ -292,7 +299,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
gid_data = &ct_rsp->rsp.gid_pt.entries[i];
list[i].d_id.b.domain = gid_data->port_id[0];
list[i].d_id.b.area = gid_data->port_id[1];
@@ -313,7 +320,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
* single call. Return a failed status, and let GA_NXT handle
* the overload.
*/
- if (i == MAX_FIBRE_DEVICES)
+ if (i == ha->max_fibre_devices)
rval = QLA_FUNCTION_FAILED;
}
@@ -330,7 +337,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
ms_iocb_entry_t *ms_pkt;
@@ -341,7 +348,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gpn_id(vha, list);
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
@@ -364,9 +371,11 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2056,
"GPN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save portname */
memcpy(list[i].port_name,
@@ -391,7 +400,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
@@ -401,7 +410,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gnn_id(vha, list);
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
@@ -424,9 +433,11 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2057,
"GNN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save nodename */
memcpy(list[i].node_name,
@@ -735,7 +746,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
static int
qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
struct sns_cmd_pkt *sns_cmd;
@@ -814,11 +825,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
uint16_t i;
uint8_t *entry;
struct sns_cmd_pkt *sns_cmd;
+ uint16_t gid_pt_sns_data_size;
+
+ gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
/* Issue GID_PT. */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
- GID_PT_SNS_DATA_SIZE);
+ gid_pt_sns_data_size);
/* Prepare SNS command arguments -- port_type. */
sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
@@ -839,7 +853,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
entry = &sns_cmd->p.gid_data[(i * 4) + 16];
list[i].d_id.b.domain = entry[1];
list[i].d_id.b.area = entry[2];
@@ -858,7 +872,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
* single call. Return a failed status, and let GA_NXT handle
* the overload.
*/
- if (i == MAX_FIBRE_DEVICES)
+ if (i == ha->max_fibre_devices)
rval = QLA_FUNCTION_FAILED;
}
@@ -877,12 +891,12 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
static int
qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
@@ -933,12 +947,12 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
static int
qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
@@ -1107,20 +1121,26 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
static int
qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
{
- int ret;
+ int ret, rval;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
ret = QLA_SUCCESS;
if (vha->flags.management_server_logged_in)
return ret;
- ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
- mb, BIT_1|BIT_0);
- if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_disc, vha, 0x2024,
- "Failed management_server login: loopid=%x mb[0]=%x "
- "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
- vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
+ rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
+ 0xfa, mb, BIT_1|BIT_0);
+ if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_disc, vha, 0x2085,
+ "Failed management_server login: loopid=%x "
+ "rval=%d\n", vha->mgmt_svr_loop_id, rval);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2024,
+ "Failed management_server login: loopid=%x "
+ "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
+ mb[7]);
ret = QLA_FUNCTION_FAILED;
} else
vha->flags.management_server_logged_in = 1;
@@ -1547,7 +1567,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA25XX(ha))
@@ -1594,6 +1614,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
default:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1724,7 +1748,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
int
qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
@@ -1734,7 +1758,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
@@ -1757,9 +1781,11 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2023,
"GFPN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save fabric portname */
memcpy(list[i].fabric_port_name,
@@ -1846,7 +1872,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
if (rval)
return rval;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
@@ -1947,7 +1973,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct qla_hw_data *ha = vha->hw;
uint8_t fcp_scsi_features = 0;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
* Process this port */
list[i].fc4_type = FC4_TYPE_UNKNOWN;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1fa067e053d2..b9465643396b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -29,7 +29,6 @@ static int qla2x00_configure_loop(scsi_qla_host_t *);
static int qla2x00_configure_local_loop(scsi_qla_host_t *);
static int qla2x00_configure_fabric(scsi_qla_host_t *);
static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
-static int qla2x00_device_resync(scsi_qla_host_t *);
static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
uint16_t *);
@@ -41,11 +40,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
/* SRB Extensions ---------------------------------------------------------- */
-static void
-qla2x00_ctx_sp_timeout(unsigned long __data)
+void
+qla2x00_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
- struct srb_ctx *ctx;
struct srb_iocb *iocb;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +53,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
spin_lock_irqsave(&ha->hardware_lock, flags);
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
- ctx = sp->ctx;
- iocb = ctx->u.iocb_cmd;
+ iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- iocb->free(sp);
+ sp->free(fcport->vha, sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void
-qla2x00_ctx_sp_free(srb_t *sp)
+void
+qla2x00_sp_free(void *data, void *ptr)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = ctx->u.iocb_cmd;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- kfree(iocb);
- kfree(ctx);
- mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+ mempool_free(sp, vha->hw->srb_mempool);
QLA_VHA_MARK_NOT_BUSY(vha);
}
-inline srb_t *
-qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
- unsigned long tmo)
-{
- srb_t *sp = NULL;
- struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
- struct srb_iocb *iocb;
- uint8_t bail;
-
- QLA_VHA_MARK_BUSY(vha, bail);
- if (bail)
- return NULL;
-
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- goto done;
- }
- iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
- if (!iocb) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- kfree(ctx);
- goto done;
- }
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
- ctx->iocbs = 1;
- ctx->u.iocb_cmd = iocb;
- iocb->free = qla2x00_ctx_sp_free;
-
- init_timer(&iocb->timer);
- if (!tmo)
- goto done;
- iocb->timer.expires = jiffies + tmo * HZ;
- iocb->timer.data = (unsigned long)sp;
- iocb->timer.function = qla2x00_ctx_sp_timeout;
- add_timer(&iocb->timer);
-done:
- if (!sp)
- QLA_VHA_MARK_NOT_BUSY(vha);
- return sp;
-}
-
/* Asynchronous Login/Logout Routines -------------------------------------- */
static inline unsigned long
@@ -149,19 +93,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
}
static void
-qla2x00_async_iocb_timeout(srb_t *sp)
+qla2x00_async_iocb_timeout(void *data)
{
+ srb_t *sp = (srb_t *)data;
fc_port_t *fcport = sp->fcport;
- struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
"Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
- ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+ sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
- if (ctx->type == SRB_LOGIN_CMD) {
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ if (sp->type == SRB_LOGIN_CMD) {
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +117,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
}
static void
-qla2x00_async_login_ctx_done(srb_t *sp)
+qla2x00_async_login_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
@@ -188,22 +134,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_LOGIN_CMD;
- ctx->name = "login";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_LOGIN_CMD;
+ sp->name = "login";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_login_ctx_done;
+ sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +164,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_logout_ctx_done(srb_t *sp)
+qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_LOGOUT_CMD;
- ctx->name = "logout";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_LOGOUT_CMD;
+ sp->name = "logout";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_logout_ctx_done;
+ sp->done = qla2x00_async_logout_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -266,20 +212,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_adisc_ctx_done(srb_t *sp)
+qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
@@ -287,22 +235,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_ADISC_CMD;
- ctx->name = "adisc";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_ADISC_CMD;
+ sp->name = "adisc";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_adisc_ctx_done;
+ sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
@@ -316,46 +263,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
+qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ uint32_t flags;
+ uint16_t lun;
+ int rval;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ flags = iocb->u.tmf.flags;
+ lun = (uint16_t)iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
+ vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
- qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
- iocb->free(sp);
+ if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
+ }
+ }
+ sp->free(sp->fcport->vha, sp);
}
int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *tcf;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_TM_CMD;
- ctx->name = "tmf";
- tcf = ctx->u.iocb_cmd;
- tcf->u.tmf.flags = flags;
+ sp->type = SRB_TM_CMD;
+ sp->name = "tmf";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ tcf = &sp->u.iocb_cmd;
+ tcf->u.tmf.flags = tm_flags;
tcf->u.tmf.lun = lun;
tcf->u.tmf.data = tag;
tcf->timeout = qla2x00_async_iocb_timeout;
- tcf->done = qla2x00_async_tm_cmd_ctx_done;
+ sp->done = qla2x00_async_tm_cmd_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -368,7 +331,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
return rval;
done_free_sp:
- tcf->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
@@ -387,6 +350,13 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
* requests.
*/
rval = qla2x00_get_port_database(vha, fcport, 0);
+ if (rval == QLA_NOT_LOGGED_IN) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
if (rval != QLA_SUCCESS) {
qla2x00_post_async_logout_work(vha, fcport, NULL);
qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -452,30 +422,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
-void
-qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- struct srb_iocb *iocb)
-{
- int rval;
- uint32_t flags;
- uint16_t lun;
-
- flags = iocb->u.tmf.flags;
- lun = (uint16_t)iocb->u.tmf.lun;
-
- /* Issue Marker IOCB */
- rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
-
- if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
- "TM IOCB failed (%x).\n", rval);
- }
-
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -969,6 +915,9 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
{
uint16_t mb[4] = {0x1010, 0, 1, 0};
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_SUCCESS;
+
return qla81xx_write_mpi_register(vha, mb);
}
@@ -1262,7 +1211,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA81XX(ha))
+ if (IS_QLA83XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
else if (IS_QLA25XX(ha))
fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
@@ -1270,10 +1221,20 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
- if (ha->mqenable)
- mq_size = sizeof(struct qla2xxx_mq_chain);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
/* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -1484,17 +1445,8 @@ enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
if (IS_QLA82XX(ha))
qla82xx_check_md_needed(vha);
- else {
- rval = qla2x00_get_fw_version(vha,
- &ha->fw_major_version,
- &ha->fw_minor_version,
- &ha->fw_subminor_version,
- &ha->fw_attributes,
- &ha->fw_memory_size,
- ha->mpi_version,
- &ha->mpi_capabilities,
- ha->phy_version);
- }
+ else
+ rval = qla2x00_get_fw_version(vha);
if (rval != QLA_SUCCESS)
goto failed;
ha->flags.npiv_supported = 0;
@@ -1535,6 +1487,9 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (IS_QLA83XX(ha))
+ goto skip_fac_check;
+
if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
@@ -1547,6 +1502,11 @@ enable_82xx_npiv:
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
+skip_fac_check:
+ if (IS_QLA83XX(ha)) {
+ ha->flags.fac_supported = 0;
+ rval = QLA_SUCCESS;
+ }
}
}
failed:
@@ -1725,7 +1685,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
-/* Setup ring parameters in initialization control block. */
+ /* Setup ring parameters in initialization control block. */
icb = (struct init_cb_24xx *)ha->init_cb;
icb->request_q_outpointer = __constant_cpu_to_le16(0);
icb->response_q_inpointer = __constant_cpu_to_le16(0);
@@ -1736,7 +1696,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
- if (ha->mqenable) {
+ if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
@@ -1756,7 +1716,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
__constant_cpu_to_le32(BIT_18);
/* Use Disable MSIX Handshake mode for capable adapters */
- if (IS_MSIX_NACK_CAPABLE(ha)) {
+ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
+ (ha->flags.msix_enabled)) {
icb->firmware_options_2 &=
__constant_cpu_to_le32(~BIT_22);
ha->flags.disable_msix_handshake = 1;
@@ -1800,7 +1761,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct rsp_que *rsp;
- struct scsi_qla_host *vp;
struct mid_init_cb_24xx *mid_init_cb =
(struct mid_init_cb_24xx *) ha->init_cb;
@@ -1831,11 +1791,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
spin_lock(&ha->vport_slock);
- /* Clear RSCN queue. */
- list_for_each_entry(vp, &ha->vp_list, list) {
- vp->rscn_in_ptr = 0;
- vp->rscn_out_ptr = 0;
- }
spin_unlock(&ha->vport_slock);
@@ -2028,7 +1983,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
&loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
if (rval != QLA_SUCCESS) {
if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
- IS_QLA8XXX_TYPE(ha) ||
+ IS_CNA_CAPABLE(ha) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
ql_dbg(ql_dbg_disc, vha, 0x2008,
"Loop is in a transition state.\n");
@@ -2120,7 +2075,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_QLA8XXX_TYPE(ha);
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -2596,13 +2551,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
} else if (ha->current_topology == ISP_CFG_F &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
clear_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -2612,7 +2565,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (!vha->flags.online ||
(test_bit(ABORT_ISP_ACTIVE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
}
@@ -2622,8 +2574,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2015,
"Loop resync needed, failing.\n");
rval = QLA_FUNCTION_FAILED;
- }
- else
+ } else
rval = qla2x00_configure_local_loop(vha);
}
@@ -2662,8 +2613,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
if (test_bit(RSCN_UPDATE, &save_flags)) {
set_bit(RSCN_UPDATE, &vha->dpc_flags);
- if (!IS_ALOGIO_CAPABLE(ha))
- vha->flags.rscn_queue_overflow = 1;
}
}
@@ -2699,7 +2648,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
found_devs = 0;
new_fcport = NULL;
- entries = MAX_FIBRE_DEVICES;
+ entries = MAX_FIBRE_DEVICES_LOOP;
ql_dbg(ql_dbg_disc, vha, 0x2016,
"Getting FCAL position map.\n");
@@ -2707,7 +2656,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
qla2x00_get_fcal_position_map(vha, NULL);
/* Get list of logged in devices. */
- memset(ha->gid_list, 0, GID_LIST_SIZE);
+ memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
@@ -2971,7 +2920,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
- int rval, rval2;
+ int rval;
fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
@@ -2995,12 +2944,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
- /* Mark devices that need re-synchronization. */
- rval2 = qla2x00_device_resync(vha);
- if (rval2 == QLA_RSCNS_HANDLED) {
- /* No point doing the scan, just continue. */
- return (QLA_SUCCESS);
- }
do {
/* FDMI support. */
if (ql2xfdmienable &&
@@ -3012,8 +2955,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = NPH_SNS;
else
loop_id = SIMPLE_NAME_SERVER;
- ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
- 0xfc, mb, BIT_1 | BIT_0);
+ rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
+ 0xfc, mb, BIT_1|BIT_0);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
"Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
@@ -3044,6 +2991,13 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
@@ -3059,7 +3013,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3184,20 +3139,21 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
rval = QLA_SUCCESS;
/* Try GID_PT to get device list, else GAN. */
- swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
+ if (!ha->swl)
+ ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
+ GFP_KERNEL);
+ swl = ha->swl;
if (!swl) {
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2054,
"GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
+ memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (ql2xiidmaenable &&
qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
@@ -3215,7 +3171,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x205e,
"Failed to allocate memory for fcport.\n");
- kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
@@ -3332,6 +3287,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+
found++;
/* Update port state. */
@@ -3368,6 +3325,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->flags |= FCF_LOGIN_NEEDED;
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ (fcport->flags & FCF_ASYNC_SENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3390,14 +3348,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x2066,
"Memory allocation failed for fcport.\n");
- kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
new_fcport->d_id.b24 = nxt_d_id.b24;
}
- kfree(swl);
kfree(new_fcport);
return (rval);
@@ -3470,6 +3426,9 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
/* If not in use then it is free to use. */
if (!found) {
+ ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+ "Assigning new loopid=%x, portid=%x.\n",
+ dev->loop_id, dev->d_id.b24);
break;
}
@@ -3488,110 +3447,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
}
/*
- * qla2x00_device_resync
- * Marks devices in the database that needs resynchronization.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Context:
- * Kernel context.
- */
-static int
-qla2x00_device_resync(scsi_qla_host_t *vha)
-{
- int rval;
- uint32_t mask;
- fc_port_t *fcport;
- uint32_t rscn_entry;
- uint8_t rscn_out_iter;
- uint8_t format;
- port_id_t d_id = {};
-
- rval = QLA_RSCNS_HANDLED;
-
- while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
- vha->flags.rscn_queue_overflow) {
-
- rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
- format = MSB(MSW(rscn_entry));
- d_id.b.domain = LSB(MSW(rscn_entry));
- d_id.b.area = MSB(LSW(rscn_entry));
- d_id.b.al_pa = LSB(LSW(rscn_entry));
-
- ql_dbg(ql_dbg_disc, vha, 0x2020,
- "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
- vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
- d_id.b.al_pa);
-
- vha->rscn_out_ptr++;
- if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
- vha->rscn_out_ptr = 0;
-
- /* Skip duplicate entries. */
- for (rscn_out_iter = vha->rscn_out_ptr;
- !vha->flags.rscn_queue_overflow &&
- rscn_out_iter != vha->rscn_in_ptr;
- rscn_out_iter = (rscn_out_iter ==
- (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
-
- if (rscn_entry != vha->rscn_queue[rscn_out_iter])
- break;
-
- ql_dbg(ql_dbg_disc, vha, 0x2021,
- "Skipping duplicate RSCN queue entry found at "
- "[%d].\n", rscn_out_iter);
-
- vha->rscn_out_ptr = rscn_out_iter;
- }
-
- /* Queue overflow, set switch default case. */
- if (vha->flags.rscn_queue_overflow) {
- ql_dbg(ql_dbg_disc, vha, 0x2022,
- "device_resync: rscn overflow.\n");
-
- format = 3;
- vha->flags.rscn_queue_overflow = 0;
- }
-
- switch (format) {
- case 0:
- mask = 0xffffff;
- break;
- case 1:
- mask = 0xffff00;
- break;
- case 2:
- mask = 0xff0000;
- break;
- default:
- mask = 0x0;
- d_id.b24 = 0;
- vha->rscn_out_ptr = vha->rscn_in_ptr;
- break;
- }
-
- rval = QLA_SUCCESS;
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->d_id.b24 & mask) != d_id.b24 ||
- fcport->port_type == FCT_BROADCAST)
- continue;
-
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- if (format != 3 ||
- fcport->port_type != FCT_INITIATOR) {
- qla2x00_mark_device_lost(vha, fcport,
- 0, 0);
- }
- }
- }
- }
- return (rval);
-}
-
-/*
* qla2x00_fabric_dev_login
* Login fabric target device and update FC port database.
*
@@ -3644,6 +3499,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
} else {
qla2x00_update_fcport(vha, fcport);
}
+ } else {
+ /* Retry Login. */
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
}
return (rval);
@@ -3684,9 +3542,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
/* Login fcport on switch. */
- ha->isp_ops->fabric_login(vha, fcport->loop_id,
+ rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb, BIT_0);
+ if (rval != QLA_SUCCESS) {
+ return rval;
+ }
if (mb[0] == MBS_PORT_ID_USED) {
/*
* Device has another loop ID. The firmware team
@@ -4100,15 +3961,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
- if (IS_QLA81XX(ha))
- qla2x00_get_fw_version(vha,
- &ha->fw_major_version,
- &ha->fw_minor_version,
- &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
-
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ qla2x00_get_fw_version(vha);
if (ha->fce) {
ha->flags.fce_enabled = 1;
memset(ha->fce, 0,
@@ -4974,7 +4828,6 @@ try_blob_fw:
ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
ha->flags.running_gold_fw = 1;
-
return rval;
}
@@ -5009,6 +4862,7 @@ int
qla24xx_configure_vhba(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
+ int rval2;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
@@ -5033,12 +4887,18 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
vha->flags.management_server_logged_in = 0;
/* Login to SNS first */
- ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
- if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_init, vha, 0x0103,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
- "mb[6]=%x mb[7]=%x.\n",
- NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
+ rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
+ BIT_1);
+ if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval2 == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_init, vha, 0x0120,
+ "Failed SNS login: loop_id=%x, rval2=%d\n",
+ NPH_SNS, rval2);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x0103,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+ "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
return (QLA_FUNCTION_FAILED);
}
@@ -5214,10 +5074,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->reset_delay = 5;
nv->max_luns_per_target = __constant_cpu_to_le16(128);
nv->port_down_retry_count = __constant_cpu_to_le16(30);
- nv->link_down_timeout = __constant_cpu_to_le16(30);
+ nv->link_down_timeout = __constant_cpu_to_le16(180);
nv->enode_mac[0] = 0x00;
- nv->enode_mac[1] = 0x02;
- nv->enode_mac[2] = 0x03;
+ nv->enode_mac[1] = 0xC0;
+ nv->enode_mac[2] = 0xDD;
nv->enode_mac[3] = 0x04;
nv->enode_mac[4] = 0x05;
nv->enode_mac[5] = 0x06 + ha->port_no;
@@ -5248,9 +5108,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
- icb->enode_mac[0] = 0x01;
- icb->enode_mac[1] = 0x02;
- icb->enode_mac[2] = 0x03;
+ icb->enode_mac[0] = 0x00;
+ icb->enode_mac[1] = 0xC0;
+ icb->enode_mac[2] = 0xDD;
icb->enode_mac[3] = 0x04;
icb->enode_mac[4] = 0x05;
icb->enode_mac[5] = 0x06 + ha->port_no;
@@ -5353,6 +5213,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
+ /* if not running MSI-X we need handshaking on interrupts */
+ if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+
/* Enable ZIO. */
if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 7cc4f36cd539..6e457643c639 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -72,16 +72,19 @@ static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
+ struct crc_context *ctx;
+
+ ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
- &((struct crc_context *)sp->ctx)->dsd_list, list) {
+ &ctx->dsd_list, list) {
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
dsd_ptr->dsd_list_dma);
list_del(&dsd_ptr->list);
kfree(dsd_ptr);
}
- INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
+ INIT_LIST_HEAD(&ctx->dsd_list);
}
static inline void
@@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
return 0;
*
*/
-
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
if (ql2xenablehba_err_chk >= 1)
@@ -144,3 +146,44 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
+
+static inline srb_t *
+qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(ha->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
+ return sp;
+}
+
+static inline void
+qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+ init_timer(&sp->u.iocb_cmd.timer);
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->u.iocb_cmd.timer.data = (unsigned long)sp;
+ sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
+ add_timer(&sp->u.iocb_cmd.timer);
+ sp->free = qla2x00_sp_free;
+}
+
+static inline int
+qla2x00_gid_list_size(struct qla_hw_data *ha)
+{
+ return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 55a96761b5a4..eac950924497 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -22,18 +22,19 @@ static inline uint16_t
qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cflags = 0;
/* Set transfer direction */
- if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(sp->cmd);
- } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
}
return (cflags);
}
@@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
static inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
- uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
- "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
+ "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
return 0;
}
@@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts = 0;
/* Translate SCSI opcode to a protection opcode */
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(cmd)) {
case SCSI_PROT_READ_STRIP:
*fw_prot_opts |= PO_MODE_DIF_REMOVE;
break;
@@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
- return scsi_prot_sg_count(sp->cmd);
+ return scsi_prot_sg_count(cmd);
}
/*
@@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
vha = sp->fcport->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
req = ha->req_q_map[0];
rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
@@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Set target ID and LUN number*/
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
- cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
+ cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
/* Update tagged queuing modifier */
if (scsi_populate_tag_msg(cmd, tag)) {
@@ -473,7 +475,6 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
if (IS_QLA82XX(ha)) {
qla82xx_start_iocbs(vha);
@@ -487,9 +488,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -609,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -636,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = sp->ctx;
+ ctx = GET_CMD_CTX_SP(sp);
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -725,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
int i;
struct req_que *req;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -745,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -797,7 +798,7 @@ static inline void
qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
@@ -952,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
memset(&sgx, 0, sizeof(struct qla2_sgx));
- sgx.tot_bytes = scsi_bufflen(sp->cmd);
- sgx.cur_sg = scsi_sglist(sp->cmd);
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
- sg_prot = scsi_prot_sglist(sp->cmd);
+ sg_prot = scsi_prot_sglist(cmd);
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
@@ -995,7 +996,7 @@ alloc_and_fill:
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1044,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
- scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
uint8_t *cp;
- scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1078,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1091,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
- sp->cmd);
+ i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x300b,
- "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
+ "User data buffer=%p for cmd=%p.\n", cp, cmd);
}
}
/* Null termination */
@@ -1128,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
-
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
@@ -1160,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1171,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
ql_dbg(ql_dbg_io, vha, 0x3027,
"%s(): %p, sg_entry %d - "
"addr=0x%x0x%x, len=%d.\n",
@@ -1182,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x3028,
"%s(): Protection Data buffer = %p.\n", __func__,
@@ -1228,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
dma_addr_t crc_ctx_dma;
char tag[2];
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
sgc = 0;
/* Update entry type to indicate Command Type CRC_2 IOCB */
@@ -1256,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA);
}
- if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0;
/* Allocate CRC context from global pool */
- crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
- GFP_ATOMIC, &crc_ctx_dma);
+ crc_ctx_pkt = sp->u.scmd.ctx =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
goto crc_queuing_error;
@@ -1310,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
fcp_cmnd->additional_cdb_len |= 2;
- int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+ int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1345,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
blk_size = cmd->device->sector_size;
dif_bytes = (data_bytes / blk_size) * 8;
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
total_bytes = data_bytes;
@@ -1445,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
char tag[2];
@@ -1510,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@@ -1529,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1611,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
uint16_t fw_prot_opts = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_crc_2 *cmd_pkt;
@@ -1728,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
/* Fill-in common area */
@@ -1744,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Total Data and protection segment(s) */
@@ -1797,7 +1797,7 @@ queuing_error:
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
@@ -1818,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
- struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@@ -1848,15 +1847,13 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
sp->handle = handle;
/* Adjust entry-counts as needed. */
- if (sp->ctx) {
- ctx = sp->ctx;
- req_cnt = ctx->iocbs;
- }
+ if (sp->type != SRB_SCSI_CMD)
+ req_cnt = sp->iocbs;
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
- if (ha->mqenable)
+ if (ha->mqenable || IS_QLA83XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_QLA82XX(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -1889,8 +1886,7 @@ queuing_error:
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@@ -1909,8 +1905,7 @@ static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;
@@ -1999,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
struct req_que *req = vha->req;
flags = iocb->u.tmf.flags;
@@ -2027,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2041,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
els_iocb->opcode =
- (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
+ sp->type == SRB_ELS_CMD_RPT ?
bsg_job->request->rqst_data.r_els.els_code :
bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2078,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -2155,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -2245,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- char tag[2];
+ char tag[2];
/* Setup device pointers. */
ret = 0;
reg = &ha->iobase->isp82;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
req = vha->req;
rsp = ha->rsp_q_map[0];
@@ -2354,12 +2348,14 @@ sufficient_dsds:
if (req->cnt < (req_cnt + 2))
goto queuing_error;
- ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!sp->ctx) {
+ ctx = sp->u.scmd.ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
ql_log(ql_log_fatal, vha, 0x3010,
"Failed to allocate ctx for cmd=%p.\n", cmd);
goto queuing_error;
}
+
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@@ -2410,12 +2406,12 @@ sufficient_dsds:
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
goto queuing_error_fcp_cmnd;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -2495,9 +2491,9 @@ sufficient_dsds:
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
- sizeof(cmd_pkt->lun));
+ sizeof(cmd_pkt->lun));
/*
* Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@@ -2538,7 +2534,7 @@ sufficient_dsds:
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
wmb();
@@ -2584,9 +2580,9 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->ctx) {
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
+ if (sp->u.scmd.ctx) {
+ mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
+ sp->u.scmd.ctx = NULL;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2599,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
int rval;
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *pkt;
- struct srb_ctx *ctx = sp->ctx;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
@@ -2612,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
}
rval = QLA_SUCCESS;
- switch (ctx->type) {
+ switch (sp->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_login_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 349843ea32f6..f79844ce7122 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -44,8 +44,8 @@ qla2100_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505d,
+ "%s: NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -141,8 +141,8 @@ qla2300_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x5058,
+ "%s: NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -289,7 +289,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
mb[cnt] = RD_REG_WORD(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
- "Inter-Driver Commucation %s -- "
+ "Inter-Driver Communication %s -- "
"%04x %04x %04x %04x %04x %04x %04x.\n",
event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
mb[4], mb[5], mb[6]);
@@ -318,7 +318,7 @@ void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
+ static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
@@ -328,12 +328,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
uint32_t rscn_entry, host_pid;
- uint8_t rscn_queue_index;
unsigned long flags;
/* Setup to process RIO completion. */
handle_cnt = 0;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
@@ -405,7 +404,8 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+ RD_REG_WORD(&reg24->mailbox7) : 0;
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -418,6 +418,7 @@ skip_rio:
"Unrecoverable Hardware Error: adapter "
"marked OFFLINE!\n");
vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
} else {
/* Check to see if MPI timeout occurred */
if ((mbx & MBX_3) && (ha->flags.port0))
@@ -431,6 +432,7 @@ skip_rio:
"Unrecoverable Hardware Error: adapter marked "
"OFFLINE!\n");
vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
} else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -482,10 +484,10 @@ skip_rio:
ha->link_data_rate = PORT_SPEED_1GB;
} else {
link_speed = link_speeds[LS_UNKNOWN];
- if (mb[1] < 5)
+ if (mb[1] < 6)
link_speed = link_speeds[mb[1]];
else if (mb[1] == 0x13)
- link_speed = link_speeds[5];
+ link_speed = link_speeds[6];
ha->link_data_rate = mb[1];
}
@@ -497,7 +499,8 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
- mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
+ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -547,7 +550,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
ql_dbg(ql_dbg_async, vha, 0x500d,
"DCBX Completed -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
@@ -681,8 +684,6 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
- vha->flags.rscn_queue_overflow = 1;
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
@@ -711,15 +712,6 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
- rscn_queue_index = vha->rscn_in_ptr + 1;
- if (rscn_queue_index == MAX_RSCN_COUNT)
- rscn_queue_index = 0;
- if (rscn_queue_index != vha->rscn_out_ptr) {
- vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
- vha->rscn_in_ptr = rscn_queue_index;
- } else {
- vha->flags.rscn_queue_overflow = 1;
- }
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -809,6 +801,10 @@ skip_rio:
case MBA_IDC_TIME_EXT:
qla81xx_idc_event(vha, mb[0], mb[1]);
break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5057,
+ "Unknown AEN:%04x %04x %04x %04x\n",
+ mb[0], mb[1], mb[2], mb[3]);
}
if (!vha->vp_idx && ha->num_vhosts)
@@ -845,8 +841,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
req->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
- sp->cmd->result = DID_OK << 16;
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, DID_OK << 16);
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
@@ -903,7 +898,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
- struct srb_ctx *ctx;
uint16_t *data;
uint16_t status;
@@ -911,9 +905,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- lio = ctx->u.iocb_cmd;
- type = ctx->name;
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@@ -937,7 +930,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
status = le16_to_cpu(mbx->status);
- if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
+ if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@@ -948,7 +941,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
- if (ctx->type == SRB_LOGIN_CMD) {
+ if (sp->type == SRB_LOGIN_CMD) {
fcport->port_type = FCT_TARGET;
if (le16_to_cpu(mbx->mb1) & BIT_0)
fcport->port_type = FCT_INITIATOR;
@@ -979,7 +972,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb7));
logio_done:
- lio->done(sp);
+ sp->done(vha, sp, 0);
}
static void
@@ -988,29 +981,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "CT_IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
+ int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = sp->ctx;
- bsg_job = sp_bsg->u.bsg_job;
+ bsg_job = sp->u.bsg_job;
- type = NULL;
- switch (sp_bsg->type) {
- case SRB_CT_CMD:
- type = "ct pass-through";
- break;
- default:
- ql_log(ql_log_warn, vha, 0x5047,
- "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
- return;
- }
+ type = "ct pass-through";
comp_status = le16_to_cpu(pkt->comp_status);
@@ -1022,7 +1004,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
@@ -1035,30 +1017,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
- bsg_job->reply->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
- if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
- kfree(sp->fcport);
-
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
+ sp->done(vha, sp, res);
}
static void
@@ -1067,22 +1038,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "ELS_CT_IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
+ int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = sp->ctx;
- bsg_job = sp_bsg->u.bsg_job;
+ bsg_job = sp->u.bsg_job;
type = NULL;
- switch (sp_bsg->type) {
+ switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "els";
@@ -1091,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- ql_log(ql_log_warn, vha, 0x503e,
- "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
+ ql_dbg(ql_dbg_user, vha, 0x503e,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
return;
}
@@ -1108,11 +1077,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+ le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
- ql_log(ql_log_info, vha, 0x503f,
+ ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
@@ -1122,7 +1091,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
- ql_log(ql_log_info, vha, 0x5040,
+ ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
@@ -1130,32 +1099,21 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
- bsg_job->reply->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
+ ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
(uint8_t *)pkt, sizeof(*pkt));
}
else {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
- (sp_bsg->type == SRB_CT_CMD))
- kfree(sp->fcport);
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
+ sp->done(vha, sp, res);
}
static void
@@ -1167,7 +1125,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
- struct srb_ctx *ctx;
uint16_t *data;
uint32_t iop[2];
@@ -1175,9 +1132,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- lio = ctx->u.iocb_cmd;
- type = ctx->name;
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@@ -1185,7 +1141,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
- ql_log(ql_log_warn, vha, 0x5034,
+ ql_log(ql_log_warn, fcport->vha, 0x5034,
"Async-%s error entry - hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
type, sp->handle, fcport->d_id.b.domain,
@@ -1198,14 +1154,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- ql_dbg(ql_dbg_async, vha, 0x5036,
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
"Async-%s complete - hdl=%x portid=%02x%02x%02x "
"iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
- if (ctx->type != SRB_LOGIN_CMD)
+ if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1239,7 +1195,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_dbg(ql_dbg_async, vha, 0x5037,
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
"Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
"iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
@@ -1248,7 +1204,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[1]));
logio_done:
- lio->done(sp);
+ sp->done(vha, sp, 0);
}
static void
@@ -1260,7 +1216,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *iocb;
- struct srb_ctx *ctx;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
int error = 1;
@@ -1268,30 +1223,29 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- iocb = ctx->u.iocb_cmd;
- type = ctx->name;
+ iocb = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
if (sts->entry_status) {
- ql_log(ql_log_warn, vha, 0x5038,
+ ql_log(ql_log_warn, fcport->vha, 0x5038,
"Async-%s error - hdl=%x entry-status(%x).\n",
type, sp->handle, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- ql_log(ql_log_warn, vha, 0x5039,
+ ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
type, sp->handle, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- ql_log(ql_log_warn, vha, 0x503a,
+ ql_log(ql_log_warn, fcport->vha, 0x503a,
"Async-%s error - hdl=%x no response info(%x).\n",
type, sp->handle, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_log(ql_log_warn, vha, 0x503b,
+ ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
type, sp->handle, sts->rsp_data_len);
} else if (sts->data[3]) {
- ql_log(ql_log_warn, vha, 0x503c,
+ ql_log(ql_log_warn, fcport->vha, 0x503c,
"Async-%s error - hdl=%x response(%x).\n",
type, sp->handle, sts->data[3]);
} else {
@@ -1304,7 +1258,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
(uint8_t *)sts, sizeof(*sts));
}
- iocb->done(sp);
+ sp->done(vha, sp, 0);
}
/**
@@ -1390,25 +1344,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
- uint32_t sense_len, struct rsp_que *rsp)
+ uint32_t sense_len, struct rsp_que *rsp, int res)
{
struct scsi_qla_host *vha = sp->fcport->vha;
- struct scsi_cmnd *cp = sp->cmd;
+ struct scsi_cmnd *cp = GET_CMD_SP(sp);
+ uint32_t track_sense_len;
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE;
- sp->request_sense_length = sense_len;
- sp->request_sense_ptr = cp->sense_buffer;
- if (sp->request_sense_length > par_sense_len)
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+ track_sense_len = sense_len;
+
+ if (sense_len > par_sense_len)
sense_len = par_sense_len;
memcpy(cp->sense_buffer, sense_data, sense_len);
- sp->request_sense_ptr += sense_len;
- sp->request_sense_length -= sense_len;
- if (sp->request_sense_length != 0)
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+ track_sense_len -= sense_len;
+ SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+ if (track_sense_len != 0) {
rsp->status_srb = sp;
+ cp->result = res;
+ }
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@@ -1436,7 +1397,7 @@ static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
struct scsi_qla_host *vha = sp->fcport->vha;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t *ap = &sts24->data[12];
uint8_t *ep = &sts24->data[20];
uint32_t e_ref_tag, a_ref_tag;
@@ -1580,6 +1541,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint16_t que;
struct req_que *req;
int logit = 1;
+ int res = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -1619,7 +1581,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
qla2xxx_wake_dpc(vha);
return;
}
- cp = sp->cmd;
+ cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
@@ -1668,11 +1630,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- ql_dbg(ql_dbg_io, vha, 0x3019,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
- cp->result = DID_BUS_BUSY << 16;
+ res = DID_BUS_BUSY << 16;
goto out;
}
}
@@ -1689,7 +1651,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
case CS_COMPLETE:
case CS_QUEUE_FULL:
if (scsi_status == 0) {
- cp->result = DID_OK << 16;
+ res = DID_OK << 16;
break;
}
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@@ -1699,19 +1661,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_dbg(ql_dbg_io, vha, 0x301a,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
}
- cp->result = DID_OK << 16 | lscsi_status;
+ res = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_dbg(ql_dbg_io, vha, 0x301b,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
"QUEUE FULL detected.\n");
break;
}
@@ -1724,7 +1686,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
- rsp);
+ rsp, res);
break;
case CS_DATA_UNDERRUN:
@@ -1733,36 +1695,36 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_dbg(ql_dbg_io, vha, 0x301d,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
"Dropped frame(s) detected "
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16 | lscsi_status;
+ res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_dbg(ql_dbg_io, vha, 0x301e,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
} else {
- ql_dbg(ql_dbg_io, vha, 0x301f,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16 | lscsi_status;
+ res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
- cp->result = DID_OK << 16 | lscsi_status;
+ res = DID_OK << 16 | lscsi_status;
logit = 0;
check_scsi_status:
@@ -1772,7 +1734,7 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_dbg(ql_dbg_io, vha, 0x3020,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
"QUEUE FULL detected.\n");
logit = 1;
break;
@@ -1785,7 +1747,7 @@ check_scsi_status:
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len,
- sense_len, rsp);
+ sense_len, rsp, res);
}
break;
@@ -1802,7 +1764,7 @@ check_scsi_status:
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
- cp->result = DID_TRANSPORT_DISRUPTED << 16;
+ res = DID_TRANSPORT_DISRUPTED << 16;
if (comp_status == CS_TIMEOUT) {
if (IS_FWI2_CAPABLE(ha))
@@ -1812,7 +1774,7 @@ check_scsi_status:
break;
}
- ql_dbg(ql_dbg_io, vha, 0x3021,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
"Port down status: port-state=0x%x.\n",
atomic_read(&fcport->state));
@@ -1821,25 +1783,25 @@ check_scsi_status:
break;
case CS_ABORTED:
- cp->result = DID_RESET << 16;
+ res = DID_RESET << 16;
break;
case CS_DIF_ERROR:
logit = qla2x00_handle_dif_error(sp, sts24);
break;
default:
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
out:
if (logit)
- ql_dbg(ql_dbg_io, vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) "
"nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
"cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
- comp_status, scsi_status, cp->result, vha->host_no,
+ comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@@ -1848,7 +1810,7 @@ out:
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, res);
}
/**
@@ -1861,84 +1823,52 @@ out:
static void
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
- uint8_t sense_sz = 0;
+ uint8_t sense_sz = 0;
struct qla_hw_data *ha = rsp->hw;
struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
- srb_t *sp = rsp->status_srb;
+ srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
+ uint32_t sense_len;
+ uint8_t *sense_ptr;
- if (sp != NULL && sp->request_sense_length != 0) {
- cp = sp->cmd;
- if (cp == NULL) {
- ql_log(ql_log_warn, vha, 0x3025,
- "cmd is NULL: already returned to OS (sp=%p).\n",
- sp);
+ if (!sp || !GET_CMD_SENSE_LEN(sp))
+ return;
- rsp->status_srb = NULL;
- return;
- }
+ sense_len = GET_CMD_SENSE_LEN(sp);
+ sense_ptr = GET_CMD_SENSE_PTR(sp);
- if (sp->request_sense_length > sizeof(pkt->data)) {
- sense_sz = sizeof(pkt->data);
- } else {
- sense_sz = sp->request_sense_length;
- }
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_log(ql_log_warn, vha, 0x3025,
+ "cmd is NULL: already returned to OS (sp=%p).\n", sp);
- /* Move sense data. */
- if (IS_FWI2_CAPABLE(ha))
- host_to_fcp_swap(pkt->data, sizeof(pkt->data));
- memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
- sp->request_sense_ptr, sense_sz);
-
- sp->request_sense_ptr += sense_sz;
- sp->request_sense_length -= sense_sz;
-
- /* Place command on done queue. */
- if (sp->request_sense_length == 0) {
- rsp->status_srb = NULL;
- qla2x00_sp_compl(ha, sp);
- }
+ rsp->status_srb = NULL;
+ return;
}
-}
-static int
-qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
-{
- struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
+ if (sense_len > sizeof(pkt->data))
+ sense_sz = sizeof(pkt->data);
+ else
+ sense_sz = sense_len;
- if (!sp->ctx)
- return 1;
+ /* Move sense data. */
+ if (IS_FWI2_CAPABLE(ha))
+ host_to_fcp_swap(pkt->data, sizeof(pkt->data));
+ memcpy(sense_ptr, pkt->data, sense_sz);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+ sense_ptr, sense_sz);
- ctx = sp->ctx;
+ sense_len -= sense_sz;
+ sense_ptr += sense_sz;
- if (ctx->type == SRB_LOGIN_CMD ||
- ctx->type == SRB_LOGOUT_CMD ||
- ctx->type == SRB_TM_CMD) {
- ctx->u.iocb_cmd->done(sp);
- return 0;
- } else if (ctx->type == SRB_ADISC_CMD) {
- ctx->u.iocb_cmd->free(sp);
- return 0;
- } else {
- struct fc_bsg_job *bsg_job;
-
- bsg_job = ctx->u.bsg_job;
- if (ctx->type == SRB_ELS_CMD_HST ||
- ctx->type == SRB_CT_CMD)
- kfree(sp->fcport);
-
- bsg_job->reply->reply_data.ctels_reply.status =
- FC_CTELS_STATUS_OK;
- bsg_job->reply->result = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
- return 0;
+ SET_CMD_SENSE_PTR(sp, sense_ptr);
+ SET_CMD_SENSE_LEN(sp, sense_len);
+
+ /* Place command on done queue. */
+ if (sense_len == 0) {
+ rsp->status_srb = NULL;
+ sp->done(ha, sp, cp->result);
}
- return 1;
}
/**
@@ -1953,53 +1883,34 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
struct qla_hw_data *ha = vha->hw;
const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
- struct req_que *req = ha->req_q_map[que];
-
- if (pkt->entry_status & RF_INV_E_ORDER)
- ql_dbg(ql_dbg_async, vha, 0x502a,
- "Invalid Entry Order.\n");
- else if (pkt->entry_status & RF_INV_E_COUNT)
- ql_dbg(ql_dbg_async, vha, 0x502b,
- "Invalid Entry Count.\n");
- else if (pkt->entry_status & RF_INV_E_PARAM)
- ql_dbg(ql_dbg_async, vha, 0x502c,
- "Invalid Entry Parameter.\n");
- else if (pkt->entry_status & RF_INV_E_TYPE)
- ql_dbg(ql_dbg_async, vha, 0x502d,
- "Invalid Entry Type.\n");
- else if (pkt->entry_status & RF_BUSY)
- ql_dbg(ql_dbg_async, vha, 0x502e,
- "Busy.\n");
- else
- ql_dbg(ql_dbg_async, vha, 0x502f,
- "UNKNOWN flag error.\n");
+ struct req_que *req = NULL;
+ int res = DID_ERROR << 16;
+
+ ql_dbg(ql_dbg_async, vha, 0x502a,
+ "type of error status in response: 0x%x\n", pkt->entry_status);
+
+ if (que >= ha->max_req_queues || !ha->req_q_map[que])
+ goto fatal;
+
+ req = ha->req_q_map[que];
+
+ if (pkt->entry_status & RF_BUSY)
+ res = DID_BUS_BUSY << 16;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- if (qla2x00_free_sp_ctx(vha, sp)) {
- if (pkt->entry_status &
- (RF_INV_E_ORDER | RF_INV_E_COUNT |
- RF_INV_E_PARAM | RF_INV_E_TYPE)) {
- sp->cmd->result = DID_ERROR << 16;
- } else if (pkt->entry_status & RF_BUSY) {
- sp->cmd->result = DID_BUS_BUSY << 16;
- } else {
- sp->cmd->result = DID_ERROR << 16;
- }
- qla2x00_sp_compl(ha, sp);
- }
- } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
- COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
- || pkt->entry_type == COMMAND_TYPE_6) {
- ql_log(ql_log_warn, vha, 0x5030,
- "Error entry - invalid handle.\n");
-
- if (IS_QLA82XX(ha))
- set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
- else
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ sp->done(ha, sp, res);
+ return;
}
+fatal:
+ ql_log(ql_log_warn, vha, 0x5030,
+ "Error entry - invalid handle/queue.\n");
+
+ if (IS_QLA82XX(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
}
/**
@@ -2127,7 +2038,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return;
rval = QLA_SUCCESS;
@@ -2168,7 +2079,7 @@ done:
}
/**
- * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
* @irq:
* @dev_id: SCSI driver HA context
*
@@ -2192,8 +2103,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x5059,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2276,8 +2187,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505a,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2306,8 +2217,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2340,8 +2251,8 @@ qla24xx_msix_default(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505c,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2530,8 +2441,14 @@ msix_failed:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
- ha->mqenable = 1;
+ if (IS_QLA83XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
+ } else
+ if (ha->mqiobase
+ && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
@@ -2552,8 +2469,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
- if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2615,7 +2532,7 @@ clear_risc_ints:
* FIXME: Noted that 8014s were being dropped during NK testing.
* Timing deltas during MSI-X/INTa transitions?
*/
- if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
goto fail;
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 08f1d01bdc1c..b4a23394a7bd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,17 +46,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
if (ha->pdev->error_state > pci_channel_io_frozen) {
- ql_log(ql_log_warn, base_vha, 0x1001,
+ ql_log(ql_log_warn, vha, 0x1001,
"error_state is greater than pci_channel_io_frozen, "
"exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
if (vha->device_flags & DFLG_DEV_FAILED) {
- ql_log(ql_log_warn, base_vha, 0x1002,
+ ql_log(ql_log_warn, vha, 0x1002,
"Device in failed state, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -69,7 +69,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.pci_channel_io_perm_failure) {
- ql_log(ql_log_warn, base_vha, 0x1003,
+ ql_log(ql_log_warn, vha, 0x1003,
"Perm failure on EEH timeout MBX, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -77,7 +77,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- ql_log(ql_log_warn, base_vha, 0x1004,
+ ql_log(ql_log_warn, vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
return QLA_FUNCTION_TIMEOUT;
}
@@ -89,8 +89,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- ql_log(ql_log_warn, base_vha, 0x1005,
- "Cmd access timeout, Exiting.\n");
+ ql_log(ql_log_warn, vha, 0x1005,
+ "Cmd access timeout, cmd=0x%x, Exiting.\n",
+ mcp->mb[0]);
return QLA_FUNCTION_TIMEOUT;
}
@@ -98,7 +99,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Save mailbox command for debug */
ha->mcp = mcp;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
+ ql_dbg(ql_dbg_mbx, vha, 0x1006,
"Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -127,28 +128,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr++;
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
"Loaded MBX registers (displayed in bytes) =.\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
(uint8_t *)mcp->mb, 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
((uint8_t *)mcp->mb + 0x10), 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
((uint8_t *)mcp->mb + 0x20), 8);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
"I/O Address = %p.\n", optr);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Unlock mbx registers and wait for interrupt */
- ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
+ ql_dbg(ql_dbg_mbx, vha, 0x100f,
"Going to unlock irq & waiting for interrupts. "
"jiffies=%lx.\n", jiffies);
@@ -163,7 +164,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
+ ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -180,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
} else {
- ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
+ ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
if (IS_QLA82XX(ha)) {
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
+ ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -214,7 +215,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
- ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
+ ql_dbg(ql_dbg_mbx, vha, 0x1013,
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
@@ -223,7 +224,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.mbox_int) {
uint16_t *iptr2;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
+ ql_dbg(ql_dbg_mbx, vha, 0x1014,
"Cmd=%x completed.\n", command);
/* Got interrupt. Clear the flag. */
@@ -236,7 +237,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
- ql_log(ql_log_warn, base_vha, 0x1015,
+ ql_log(ql_log_warn, vha, 0x1015,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -268,13 +269,19 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x.\n", command);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
"iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
"mb[0] = 0x%x.\n", mb0);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
+
+ /*
+ * Attempt to capture a firmware dump for further analysis
+ * of the current firmware state
+ */
+ ha->isp_ops->fw_dump(vha, 0);
rval = QLA_FUNCTION_TIMEOUT;
}
@@ -285,7 +292,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->mcp = NULL;
if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
- ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
+ ql_dbg(ql_dbg_mbx, vha, 0x101a,
"Checking for additional resp interrupt.\n");
/* polling mode for non isp_abort commands. */
@@ -297,7 +304,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
- ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
+ ql_dbg(ql_dbg_mbx, vha, 0x101b,
"Timeout, schedule isp_abort_needed.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -313,15 +320,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101c,
- "Mailbox cmd timeout occured. "
- "Scheduling ISP abort eeh_busy=0x%x.\n",
- ha->flags.eeh_busy);
+ "Mailbox cmd timeout occured, cmd=0x%x, "
+ "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+ "abort.\n", command, mcp->mb[0],
+ ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
} else if (!abort_active) {
/* call abort directly since we are in the DPC thread */
- ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
+ ql_dbg(ql_dbg_mbx, vha, 0x101d,
"Timeout, calling abort_isp.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -337,9 +345,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101e,
- "Mailbox cmd timeout occured. "
- "Scheduling ISP abort.\n");
-
+ "Mailbox cmd timeout occured, cmd=0x%x, "
+ "mb[0]=0x%x. Scheduling ISP abort ",
+ command, mcp->mb[0]);
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
/* Allow next mbx cmd to come in. */
@@ -350,7 +358,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
&vha->dpc_flags);
}
clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
- ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
+ ql_dbg(ql_dbg_mbx, vha, 0x101f,
"Finished abort_isp.\n");
goto mbx_done;
}
@@ -364,8 +372,8 @@ premature_exit:
mbx_done:
if (rval) {
ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
- "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
- mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@@ -455,7 +463,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mcp->mb[4] = (nv->enhanced_features &
EXTENDED_BB_CREDITS);
@@ -508,21 +516,22 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
* Kernel context.
*/
int
-qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
- uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
- uint32_t *mpi_caps, uint8_t *phy)
+qla2x00_get_fw_version(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+ if (IS_QLA83XX(vha->hw))
+ mcp->in_mb |= MBX_17|MBX_16|MBX_15;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -530,23 +539,37 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
goto failed;
/* Return mailbox data. */
- *major = mcp->mb[1];
- *minor = mcp->mb[2];
- *subminor = mcp->mb[3];
- *attributes = mcp->mb[6];
+ ha->fw_major_version = mcp->mb[1];
+ ha->fw_minor_version = mcp->mb[2];
+ ha->fw_subminor_version = mcp->mb[3];
+ ha->fw_attributes = mcp->mb[6];
if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
- *memory = 0x1FFFF; /* Defaults to 128KB. */
+ ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
else
- *memory = (mcp->mb[5] << 16) | mcp->mb[4];
- if (IS_QLA81XX(vha->hw)) {
- mpi[0] = mcp->mb[10] & 0xff;
- mpi[1] = mcp->mb[11] >> 8;
- mpi[2] = mcp->mb[11] & 0xff;
- *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
- phy[0] = mcp->mb[8] & 0xff;
- phy[1] = mcp->mb[9] >> 8;
- phy[2] = mcp->mb[9] & 0xff;
+ ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+ ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
+ ha->phy_version[0] = mcp->mb[8] & 0xff;
+ ha->phy_version[1] = mcp->mb[9] >> 8;
+ ha->phy_version[2] = mcp->mb[9] & 0xff;
+ }
+ if (IS_QLA83XX(ha)) {
+ if (mcp->mb[6] & BIT_15) {
+ ha->fw_attributes_h = mcp->mb[15];
+ ha->fw_attributes_ext[0] = mcp->mb[16];
+ ha->fw_attributes_ext[1] = mcp->mb[17];
+ ql_dbg(ql_dbg_mbx, vha, 0x1139,
+ "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[15], mcp->mb[6]);
+ } else
+ ql_dbg(ql_dbg_mbx, vha, 0x112f,
+ "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
+ __func__, mcp->mb[6]);
}
+
failed:
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -859,6 +882,7 @@ qla2x00_abort_command(srb_t *sp)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
@@ -881,7 +905,7 @@ qla2x00_abort_command(srb_t *sp)
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = (uint16_t)handle;
mcp->mb[3] = (uint16_t)(handle >> 16);
- mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
+ mcp->mb[6] = (uint16_t)cmd->device->lun;
mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -1028,7 +1052,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -1052,7 +1076,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
- if (IS_QLA8XXX_TYPE(vha->hw)) {
+ if (IS_CNA_CAPABLE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
vha->fcoe_fcf_idx = mcp->mb[10];
vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1163,7 +1187,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
+ if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
mcp->mb[1] = BIT_0;
mcp->mb[10] = MSW(ha->ex_init_cb_dma);
mcp->mb[11] = LSW(ha->ex_init_cb_dma);
@@ -1172,7 +1196,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[14] = sizeof(*ha->ex_init_cb);
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
}
- mcp->in_mb = MBX_0;
+ /* 1 and 2 should normally be captured. */
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha))
+ /* mb3 is additional info about the installed SFP. */
+ mcp->in_mb |= MBX_3;
mcp->buf_size = size;
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS;
@@ -1181,7 +1209,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x104d,
- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
} else {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
@@ -1260,6 +1289,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
if (IS_FWI2_CAPABLE(ha)) {
+ uint64_t zero = 0;
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
@@ -1273,6 +1303,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
}
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd24->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
/* Names are little-endian. */
memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
@@ -1289,6 +1327,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
else
fcport->port_type = FCT_TARGET;
} else {
+ uint64_t zero = 0;
+
/* Check for logged in state. */
if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
@@ -1301,6 +1341,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
}
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
/* Names are little-endian. */
memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
@@ -1481,7 +1529,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
- if (IS_QLA8XXX_TYPE(vha->hw)) {
+ if (IS_CNA_CAPABLE(vha->hw)) {
/* Logout across all FCFs. */
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_1;
@@ -1622,7 +1670,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->port_id[1] = area;
lg->port_id[2] = domain;
lg->vp_index = vha->vp_idx;
- rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1063,
"Failed to issue login IOCB (%x).\n", rval);
@@ -1885,8 +1934,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->port_id[1] = area;
lg->port_id[2] = domain;
lg->vp_index = vha->vp_idx;
-
- rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x106f,
"Failed to issue logout IOCB (%x).\n", rval);
@@ -2094,7 +2143,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
mcp->in_mb |= MBX_12;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -2121,7 +2170,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
*orig_iocb_cnt = mcp->mb[10];
if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(vha->hw) && max_fcfs)
+ if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
*max_fcfs = mcp->mb[12];
}
@@ -2686,7 +2735,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
- if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+ !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -2828,7 +2878,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
else
mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3298,6 +3348,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->mb[12] = req->qos;
mcp->mb[11] = req->vp_idx;
mcp->mb[13] = req->rid;
+ if (IS_QLA83XX(ha))
+ mcp->mb[15] = 0;
reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
QLA_QUE_PAGE * req->id);
@@ -3311,12 +3363,21 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->flags = MBX_DMA_OUT;
- mcp->tov = 60;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_QLA83XX(ha)) {
+ mcp->out_mb |= MBX_15;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ if (!IS_QLA83XX(ha))
+ WRT_REG_DWORD(&reg->req_q_out, 0);
}
req->req_q_in = &reg->req_q_in;
req->req_q_out = &reg->req_q_out;
@@ -3354,6 +3415,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[5] = rsp->length;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
+ if (IS_QLA83XX(ha))
+ mcp->mb[15] = 0;
reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
QLA_QUE_PAGE * rsp->id);
@@ -3367,12 +3430,23 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->flags = MBX_DMA_OUT;
- mcp->tov = 60;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha)) {
+ mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ } else if (IS_QLA83XX(ha)) {
+ mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(&reg->rsp_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ if (!IS_QLA83XX(ha))
+ WRT_REG_DWORD(&reg->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3424,7 +3498,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3454,7 +3528,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
@@ -3486,7 +3560,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
@@ -3641,7 +3715,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_XGMAC_STATS;
@@ -3680,7 +3754,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_DCBX_PARAMS;
@@ -3775,7 +3849,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3813,7 +3887,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_CNA_CAPABLE(ha)) {
mcp->mb[1] |= BIT_15;
mcp->mb[2] = vha->fcoe_fcf_idx;
}
@@ -3831,13 +3905,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -3976,6 +4051,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
return rval;
}
+
int
qla2x00_get_data_rate(scsi_qla_host_t *vha)
{
@@ -3993,6 +4069,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha))
+ mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -4018,7 +4096,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -4299,6 +4377,90 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
}
int
+qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_SET_LED_CONFIG;
+ mcp->mb[1] = led_cfg[0];
+ mcp->mb[2] = led_cfg[1];
+ if (IS_QLA8031(ha)) {
+ mcp->mb[3] = led_cfg[2];
+ mcp->mb[4] = led_cfg[3];
+ mcp->mb[5] = led_cfg[4];
+ mcp->mb[6] = led_cfg[5];
+ }
+
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1134,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_GET_LED_CONFIG;
+
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1137,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ led_cfg[0] = mcp->mb[1];
+ led_cfg[1] = mcp->mb[2];
+ if (IS_QLA8031(ha)) {
+ led_cfg[2] = mcp->mb[3];
+ led_cfg[3] = mcp->mb[4];
+ led_cfg[4] = mcp->mb[5];
+ led_cfg[5] = mcp->mb[6];
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
{
int rval;
@@ -4321,7 +4483,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
mcp->out_mb = MBX_7|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -4335,3 +4497,75 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
return rval;
}
+
+int
+qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+ mcp->mb[1] = LSW(reg);
+ mcp->mb[2] = MSW(reg);
+ mcp->mb[3] = LSW(data);
+ mcp->mb[4] = MSW(data);
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1131,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1132,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x113b,
+ "Implicit LOGO Unsupported.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+
+ ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
+
+ /* Perform Implicit LOGO. */
+ mcp->mb[0] = MBC_PORT_LOGOUT;
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = BIT_15;
+ mcp->out_mb = MBX_10|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_mbx, vha, 0x113d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ else
+ ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+
+ return rval;
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f488cc69fc79..aa062a1b0ca4 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -479,7 +479,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_channel = MAX_BUSES - 1;
host->max_lun = ql2xmaxlun;
host->unique_id = host->host_no;
- host->max_id = MAX_TARGETS_2200;
+ host->max_id = ha->max_fibre_devices;
host->transportt = qla2xxx_transport_vport_template;
ql_dbg(ql_dbg_vport, vha, 0xa007,
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 270ba3130fde..f0528539bbbc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -908,27 +908,37 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
return 0;
}
+int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+ uint32_t off_value, rval = 0;
+
+ WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ (off & 0xFFFF0000));
+
+ /* Read back value to make sure write has gone through */
+ RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ off_value = (off & 0x0000FFFF);
+
+ if (flag)
+ WRT_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
+ data);
+ else
+ rval = RD_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+
+ return rval;
+}
+
static int
qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ /* Dword reads to flash. */
+ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
+ *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
+ (addr & 0x0000FFFF), 0, 0);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
- qla82xx_wait_rom_busy(ha);
- if (qla82xx_wait_rom_done(ha)) {
- ql_log(ql_log_fatal, vha, 0x00ba,
- "Error waiting for rom done.\n");
- return -1;
- }
- /* Reset abyte_cnt and dummy_byte_cnt */
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- udelay(10);
- cond_resched();
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
- *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
return 0;
}
@@ -2040,8 +2050,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0xb054,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -3136,12 +3146,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
fw_minor_version = ha->fw_minor_version;
fw_subminor_version = ha->fw_subminor_version;
- rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
- &ha->fw_minor_version, &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
-
+ rval = qla2x00_get_fw_version(vha);
if (rval != QLA_SUCCESS)
return rval;
@@ -3150,7 +3155,6 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
if (fw_major_version != ha->fw_major_version ||
fw_minor_version != ha->fw_minor_version ||
fw_subminor_version != ha->fw_subminor_version) {
-
ql_log(ql_log_info, vha, 0xb02d,
"Firmware version differs "
"Previous version: %d:%d:%d - "
@@ -3614,7 +3618,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (!sp->ctx ||
+ if (!sp->u.scmd.ctx ||
(sp->flags & SRB_FCP_CMND_DMA_VALID)) {
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
@@ -3645,29 +3649,6 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
}
/* Minidump related functions */
-int
-qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
-{
- uint32_t off_value, rval = 0;
-
- WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
- (off & 0xFFFF0000));
-
- /* Read back value to make sure write has gone through */
- RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
- off_value = (off & 0x0000FFFF);
-
- if (flag)
- WRT_REG_DWORD((void *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
- data);
- else
- rval = RD_REG_DWORD((void *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
-
- return rval;
-}
-
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
@@ -4117,8 +4098,9 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
data_ptr = (uint32_t *)ha->md_dump;
if (ha->fw_dumped) {
- ql_log(ql_log_info, vha, 0xb037,
- "Firmware dump available to retrive\n");
+ ql_log(ql_log_warn, vha, 0xb037,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
goto md_failed;
}
@@ -4161,7 +4143,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
total_data_size = ha->md_dump_size;
- ql_dbg(ql_log_info, vha, 0xb03d,
+ ql_dbg(ql_dbg_p3p, vha, 0xb03d,
"Total minidump data_size 0x%x to be captured\n", total_data_size);
/* Check whether template obtained is valid */
@@ -4284,7 +4266,7 @@ skip_nxt_entry:
}
if (data_collected != total_data_size) {
- ql_dbg(ql_log_warn, vha, 0xb043,
+ ql_dbg(ql_dbg_p3p, vha, 0xb043,
"MiniDump data mismatch: Data collected: [0x%x],"
"total_data_size:[0x%x]\n",
data_collected, total_data_size);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57a226be339a..4ac50e274661 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -888,7 +888,8 @@ struct ct6_dsd {
};
#define MBC_TOGGLE_INTERRUPT 0x10
-#define MBC_SET_LED_CONFIG 0x125
+#define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */
+#define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */
/* Flash offset */
#define FLT_REG_BOOTLOAD_82XX 0x72
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 036030c95339..a2f999273a5f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
-static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
return str;
}
-static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd)
+void
+qla2x00_sp_free_dma(void *vha, void *ptr)
{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
- sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
- if (!sp) {
- ql_log(ql_log_warn, vha, 0x3006,
- "Memory allocation failed for sp.\n");
- return sp;
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
}
- atomic_set(&sp->ref_count, 1);
- sp->fcport = fcport;
- sp->cmd = cmd;
- sp->flags = 0;
- CMD_SP(cmd) = (void *)sp;
- sp->ctx = NULL;
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
- return sp;
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ ctx1 = NULL;
+ }
+
+ CMD_SP(cmd) = NULL;
+ mempool_free(sp, ha->srb_mempool);
+}
+
+static void
+qla2x00_sp_compl(void *data, void *ptr, int res)
+{
+ struct qla_hw_data *ha = (struct qla_hw_data *)data;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2x00_sp_free_dma(ha, sp);
+ cmd->scsi_done(cmd);
}
static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
+ sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2x00_sp_free_dma;
+ sp->done = qla2x00_sp_compl;
+
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_sp_free_dma(ha, sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
srb_t *sp;
+ struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if ((sp->ctx) && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
+ cmd = GET_CMD_SP(sp);
switch (type) {
case WAIT_HOST:
match = 1;
break;
case WAIT_TARGET:
- match = sp->cmd->device->id == t;
+ match = cmd->device->id == t;
break;
case WAIT_LUN:
- match = (sp->cmd->device->id == t &&
- sp->cmd->device->lun == l);
+ match = (cmd->device->id == t &&
+ cmd->device->lun == l);
break;
}
if (!match)
continue;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- status = qla2x00_eh_wait_on_command(sp->cmd);
+ status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1219,7 +1273,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que, cnt;
unsigned long flags;
srb_t *sp;
- struct srb_ctx *ctx;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx ||
- (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
- IS_PROT_IO(sp)) {
- sp->cmd->result = res;
- qla2x00_sp_compl(ha, sp);
- } else {
- ctx = sp->ctx;
- if (ctx->type == SRB_ELS_CMD_RPT ||
- ctx->type == SRB_ELS_CMD_HST ||
- ctx->type == SRB_CT_CMD) {
- struct fc_bsg_job *bsg_job =
- ctx->u.bsg_job;
- if (bsg_job->request->msgcode
- == FC_BSG_HST_CT)
- kfree(sp->fcport);
- bsg_job->req->errors = 0;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
- kfree(sp->ctx);
- mempool_free(sp,
- ha->srb_mempool);
- } else {
- ctx->u.iocb_cmd->free(sp);
- }
- }
+ sp->done(vha, sp, res);
}
}
}
@@ -1488,9 +1517,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
@@ -1593,6 +1619,96 @@ iospace_error_exit:
}
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint16_t msix;
+ int cpus;
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+ "Invalid PCI mem region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+ /* 83XX 26XX always use MQ type access for queues
+ * - mbar 2, a.k.a region 4 */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+ pci_resource_len(ha->pdev, 4));
+
+ if (!ha->mqiobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+ "BAR2/region4 not enabled\n");
+ goto mqiobase_exit;
+ }
+
+ ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+ pci_resource_len(ha->pdev, 2));
+ if (ha->msixbase) {
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev,
+ QLA_83XX_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+ "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1769,7 +1885,7 @@ static struct isp_operations qla81xx_isp_ops = {
.fw_dump = qla81xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
- .beacon_blink = qla24xx_beacon_blink,
+ .beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
@@ -1815,6 +1931,43 @@ static struct isp_operations qla82xx_isp_ops = {
.iospace_config = qla82xx_iospace_config,
};
+static struct isp_operations qla83xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla83xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+};
+
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
@@ -1909,6 +2062,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2031:
+ ha->device_type |= DT_ISP2031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8031:
+ ha->device_type |= DT_ISP8031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -1966,7 +2135,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
- int bars, max_id, mem_only = 0;
+ int bars, mem_only = 0;
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
@@ -1980,7 +2149,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2020,9 +2191,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
- if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
pdev->needs_freset = 1;
- }
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
@@ -2030,9 +2200,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->optrom_size = OPTROM_SIZE_2300;
/* Assign ISP specific operations. */
- max_id = MAX_TARGETS_2200;
if (IS_QLA2100(ha)) {
- max_id = MAX_TARGETS_2100;
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
req_length = REQUEST_ENTRY_CNT_2100;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2044,6 +2213,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2055,6 +2225,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2068,6 +2239,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX_TYPE(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2082,6 +2254,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA25XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2096,6 +2269,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA81XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2110,6 +2284,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
} else if (IS_QLA82XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_82XX;
rsp_length = RESPONSE_ENTRY_CNT_82XX;
@@ -2123,14 +2298,31 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA83XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla83xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
+
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
"mbx_count=%d, req_length=%d, "
"rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
- "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+ "max_fibre_devices=%d.\n",
ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
- ha->nvram_npiv_size);
+ ha->nvram_npiv_size, ha->max_fibre_devices);
ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
"isp_ops=%p, flash_conf_off=%d, "
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
@@ -2204,7 +2396,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- host->max_id = max_id;
+ host->max_id = ha->max_fibre_devices;
host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
@@ -2251,7 +2443,7 @@ que_init:
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable) {
+ if (ha->mqenable || IS_QLA83XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2552,6 +2744,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (ha->mqiobase)
iounmap(ha->mqiobase);
+
+ if (IS_QLA83XX(ha) && ha->msixbase)
+ iounmap(ha->msixbase);
}
pci_release_selected_regions(ha->pdev, ha->bars);
@@ -2751,8 +2946,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
- ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
- &ha->gid_list_dma, GFP_KERNEL);
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
goto fail_free_init_cb;
@@ -2893,7 +3088,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -2967,7 +3162,8 @@ fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list,
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -3045,9 +3241,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->sfp_data)
dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
- if (ha->edc_data)
- dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
-
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
@@ -3062,8 +3255,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_destroy(ha->s_dma_pool);
if (ha->gid_list)
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
- ha->gid_list_dma);
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list, ha->gid_list_dma);
if (IS_QLA82XX(ha)) {
if (!list_empty(&ha->gbl_dsd_list)) {
@@ -3095,6 +3288,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
+ kfree(ha->swl);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
@@ -3661,75 +3855,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
-static void
-qla2x00_sp_free_dma(srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- if (sp->flags & SRB_DMA_VALID) {
- scsi_dma_unmap(cmd);
- sp->flags &= ~SRB_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
- dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
- scsi_prot_sg_count(cmd), cmd->sc_data_direction);
- sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
- /* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp);
- sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- dma_pool_free(ha->dl_dma_pool, sp->ctx,
- ((struct crc_context *)sp->ctx)->crc_ctx_dma);
- sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
- }
-
- if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx = sp->ctx;
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
- ctx->fcp_cmnd_dma);
- list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx->dsd_use_cnt;
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
-
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
- cmd->scsi_done(cmd);
-}
-
-void
-qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
- sp, sp->cmd);
- if (ql2xextended_error_logging & ql_dbg_io)
- BUG();
- return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
- return;
- qla2x00_sp_final_compl(ha, sp);
-}
-
/**************************************************************************
* qla2x00_timer
*
@@ -3800,7 +3925,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3889,7 +4014,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 8
+#define FW_BLOBS 10
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3898,6 +4023,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP25XX 5
#define FW_ISP81XX 6
#define FW_ISP82XX 7
+#define FW_ISP2031 8
+#define FW_ISP8031 9
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3907,6 +4034,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
#define FW_FILE_ISP82XX "ql8200_fw.bin"
+#define FW_FILE_ISP2031 "ql2600_fw.bin"
+#define FW_FILE_ISP8031 "ql8300_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3919,6 +4048,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
{ .name = FW_FILE_ISP82XX, },
+ { .name = FW_FILE_ISP2031, },
+ { .name = FW_FILE_ISP8031, },
};
struct fw_blob *
@@ -3927,7 +4058,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct fw_blob *blob;
- blob = NULL;
if (IS_QLA2100(ha)) {
blob = &qla_fw_blobs[FW_ISP21XX];
} else if (IS_QLA2200(ha)) {
@@ -3944,6 +4074,12 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP81XX];
} else if (IS_QLA82XX(ha)) {
blob = &qla_fw_blobs[FW_ISP82XX];
+ } else if (IS_QLA2031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2031];
+ } else if (IS_QLA8031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP8031];
+ } else {
+ return NULL;
}
mutex_lock(&qla_fw_lock);
@@ -4265,6 +4401,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 16bc72844a97..3c13c0a6be63 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,6 +568,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
else if (IS_QLA82XX(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
+ } else if (IS_QLA83XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_83;
+ goto end;
}
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)req->ring;
@@ -721,13 +724,22 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
le32_to_cpu(region->size));
switch (le32_to_cpu(region->code) & 0xff) {
+ case FLT_REG_FCOE_FW:
+ if (!IS_QLA8031(ha))
+ break;
+ ha->flt_region_fw = start;
+ break;
case FLT_REG_FW:
+ if (IS_QLA8031(ha))
+ break;
ha->flt_region_fw = start;
break;
case FLT_REG_BOOT_CODE:
ha->flt_region_boot = start;
break;
case FLT_REG_VPD_0:
+ if (IS_QLA8031(ha))
+ break;
ha->flt_region_vpd_nvram = start;
if (IS_QLA82XX(ha))
break;
@@ -735,16 +747,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
- if (IS_QLA82XX(ha))
+ if (IS_QLA82XX(ha) || IS_QLA8031(ha))
break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_NVRAM_0:
+ if (IS_QLA8031(ha))
+ break;
if (ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_1:
+ if (IS_QLA8031(ha))
+ break;
if (!ha->flags.port0)
ha->flt_region_nvram = start;
break;
@@ -785,6 +801,31 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_VPD_82XX:
ha->flt_region_vpd = start;
break;
+ case FLT_REG_FCOE_VPD_0:
+ if (!IS_QLA8031(ha))
+ break;
+ ha->flt_region_vpd_nvram = start;
+ if (ha->flags.port0)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_FCOE_VPD_1:
+ if (!IS_QLA8031(ha))
+ break;
+ if (!ha->flags.port0)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_0:
+ if (!IS_QLA8031(ha))
+ break;
+ if (ha->flags.port0)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_1:
+ if (!IS_QLA8031(ha))
+ break;
+ if (!ha->flags.port0)
+ ha->flt_region_nvram = start;
+ break;
}
}
goto done;
@@ -804,15 +845,12 @@ no_flash_data:
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
ql_dbg(ql_dbg_init, vha, 0x004a,
- "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
- loc, ha->flt_region_boot,
- ha->flt_region_fw, ha->flt_region_vpd_nvram,
- ha->flt_region_vpd);
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
- ha->flt_region_nvram,
- ha->flt_region_fdt, ha->flt_region_flt,
- ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
+ "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
+ "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+ loc, ha->flt_region_boot, ha->flt_region_fw,
+ ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
+ ha->flt_region_fcp_prio);
}
static void
@@ -948,7 +986,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
uint32_t flt_addr;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -974,7 +1013,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct qla_npiv_entry *entry;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1144,8 +1184,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_hw_data *ha = vha->hw;
/* Prepare burst-capable write on supported ISPs. */
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
- dwords > OPTROM_BURST_DWORDS) {
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
@@ -1619,6 +1659,71 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+void
+qla83xx_beacon_blink(struct scsi_qla_host *vha)
+{
+ uint32_t led_select_value;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led_cfg[6];
+ uint16_t orig_led_cfg[6];
+
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
+ return;
+
+ if (IS_QLA2031(ha) && ha->beacon_blink_led) {
+ if (ha->flags.port0)
+ led_select_value = 0x00201320;
+ else
+ led_select_value = 0x00201328;
+
+ qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
+ qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
+ msleep(1000);
+ qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
+ qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
+ } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
+ int rval;
+
+ /* Save Current */
+ rval = qla81xx_get_led_config(vha, orig_led_cfg);
+ /* Do the blink */
+ if (rval == QLA_SUCCESS) {
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ led_cfg[3] = 0;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x4000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x2000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ msleep(1000);
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x4000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ }
+ /* On exit, restore original (presumes no status change) */
+ qla81xx_set_led_config(vha, orig_led_cfg);
+ }
+}
+
int
qla24xx_beacon_on(struct scsi_qla_host *vha)
{
@@ -1630,6 +1735,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
if (IS_QLA82XX(ha))
return QLA_SUCCESS;
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ goto skip_gpio; /* let blink handle it */
+
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1644,6 +1752,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
+ if (IS_QLA2031(ha))
+ goto skip_gpio;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
gpio_data = RD_REG_DWORD(&reg->gpiod);
@@ -1658,6 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
/* So all colors blink together. */
ha->beacon_color_state = 0;
+skip_gpio:
/* Let the per HBA timer kick off the blinking process. */
ha->beacon_blink_led = 1;
@@ -1676,6 +1788,13 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
return QLA_SUCCESS;
ha->beacon_blink_led = 0;
+
+ if (IS_QLA2031(ha))
+ goto set_fw_options;
+
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ return QLA_SUCCESS;
+
ha->beacon_color_state = QLA_LED_ALL_ON;
ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
@@ -1690,6 +1809,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
RD_REG_DWORD(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+set_fw_options:
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {