summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c133
-rw-r--r--drivers/scsi/aacraid/aachba.c27
-rw-r--r--drivers/scsi/aacraid/aacraid.h14
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c69
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c199
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aha1542.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/acornscsi.c3
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/atari_NCR5380.c133
-rw-r--r--drivers/scsi/be2iscsi/be.h20
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c867
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h148
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c62
-rw-r--r--drivers/scsi/be2iscsi/be_main.c313
-rw-r--r--drivers/scsi/be2iscsi/be_main.h25
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c573
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_cs.h41
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h9
-rw-r--r--drivers/scsi/cxlflash/main.c90
-rw-r--r--drivers/scsi/cxlflash/superpipe.c195
-rw-r--r--drivers/scsi/cxlflash/superpipe.h1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c979
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c38
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c36
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/gdth.c7
-rw-r--r--drivers/scsi/gdth_proc.c11
-rw-r--r--drivers/scsi/hisi_sas/Makefile2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h43
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c136
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c100
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c2214
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/hpsa.c52
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c122
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h26
-rw-r--r--drivers/scsi/imm.c7
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c5
-rw-r--r--drivers/scsi/iscsi_tcp.c54
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h351
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1064
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1287
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h136
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h82
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h127
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h22
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h117
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h10
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c153
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h47
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c37
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c272
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c25
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c3
-rw-r--r--drivers/scsi/ppa.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c201
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c146
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c119
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c6
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c106
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/snic/snic_ctl.c2
-rw-r--r--drivers/scsi/st.c122
-rw-r--r--drivers/scsi/stex.c153
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c397
-rw-r--r--drivers/scsi/ufs/ufshcd.h6
120 files changed, 9176 insertions, 3209 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e2f31c93717d..e80768f8e579 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -596,6 +596,7 @@ config XEN_SCSI_FRONTEND
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
depends on SCSI && HYPERV
+ depends on m || SCSI_FC_ATTRS != m
default HYPERV
help
Select this option to enable the Hyper-V virtual storage driver.
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d72867257346..3eff2a69fe08 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
do {
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
* entire unit.
*/
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else
cmd->SCp.this_residual -= transfersize - len;
} else
#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
#ifdef REAL_DMA
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index e4c243748a97..7dfd0fa27255 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -323,7 +323,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
return 0;
}
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
@@ -331,7 +330,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
if (unlikely(!device || !scsi_device_online(device))) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
return 0;
}
return 1;
@@ -541,7 +539,6 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -557,7 +554,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
@@ -586,7 +584,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
- aac_fib_free(cmd_fibcontext);
return -1;
}
@@ -1024,7 +1021,6 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -1040,7 +1036,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
@@ -1068,7 +1065,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
- aac_fib_free(cmd_fibcontext);
return -1;
}
@@ -1869,7 +1865,6 @@ static void io_callback(void *context, struct fib * fibptr)
break;
}
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -1954,7 +1949,8 @@ static int aac_read(struct scsi_cmnd * scsicmd)
/*
* Alocate and initialize a Fib
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext) {
printk(KERN_WARNING "aac_read: fib allocation failed\n");
return -1;
}
@@ -2051,7 +2047,8 @@ static int aac_write(struct scsi_cmnd * scsicmd)
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext) {
/* FIB temporarily unavailable,not catastrophic failure */
/* scsicmd->result = DID_ERROR << 16;
@@ -2285,7 +2282,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
/*
* Allocate and initialize a Fib
*/
- cmd_fibcontext = aac_fib_alloc(aac);
+ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
if (!cmd_fibcontext)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -3157,7 +3154,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -3187,9 +3183,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -1;
- }
+
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 074878b55a0b..efa493cf1bc6 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -62,7 +62,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41010
+# define AAC_DRIVER_BUILD 41052
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -94,6 +94,13 @@ enum {
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
+/*
+ * These macros are for keeping track of
+ * character device state.
+ */
+#define AAC_CHARDEV_UNREGISTERED (-1)
+#define AAC_CHARDEV_NEEDS_REINIT (-2)
+
/* #define AAC_DETAILED_STATUS_INFO */
struct diskparm
@@ -944,6 +951,7 @@ struct fib {
*/
struct list_head fiblink;
void *data;
+ u32 vector_no;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -1123,6 +1131,7 @@ struct aac_dev
struct fib *free_fib;
spinlock_t fib_lock;
+ struct mutex ioctl_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1234,6 +1243,7 @@ struct aac_dev
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
+ u32 handle_pci_error;
};
#define aac_adapter_interrupt(dev) \
@@ -2113,7 +2123,9 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd);
int aac_fib_setup(struct aac_dev *dev);
void aac_fib_map_free(struct aac_dev *dev);
void aac_fib_free(struct fib * context);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 54195a117f72..4b3bb52b5108 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -855,13 +855,20 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
+ mutex_lock(&dev->ioctl_mutex);
+
+ if (dev->adapter_shutdown) {
+ status = -EACCES;
+ goto cleanup;
+ }
+
/*
* HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg);
if (status != -ENOTTY)
- return status;
+ goto cleanup;
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
@@ -890,6 +897,10 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
status = -ENOTTY;
break;
}
+
+cleanup:
+ mutex_unlock(&dev->ioctl_mutex);
+
return status;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 0e954e37f0b5..2b4e75380ae6 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -212,8 +212,11 @@ int aac_send_shutdown(struct aac_dev * dev)
return -ENOMEM;
aac_fib_init(fibctx);
- cmd = (struct aac_close *) fib_data(fibctx);
+ mutex_lock(&dev->ioctl_mutex);
+ dev->adapter_shutdown = 1;
+ mutex_unlock(&dev->ioctl_mutex);
+ cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
@@ -229,7 +232,6 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
- dev->adapter_shutdown = 1;
if ((dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) &&
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1f90fe849c9..511bbc575062 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev,
- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- dev->hw_fib_va, dev->hw_fib_pa);
+ if (dev->hw_fib_va && dev->max_fib_size) {
+ pci_free_consistent(dev->pdev,
+ (dev->max_fib_size *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ }
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+ u32 i = 0;
+ u32 vector = 1;
+ struct fib *fibptr = NULL;
+
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++) {
+ if ((dev->max_msix == 1) ||
+ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+ - dev->vector_cap))) {
+ fibptr->vector_no = 0;
+ } else {
+ fibptr->vector_no = vector;
+ vector++;
+ if (vector == dev->max_msix)
+ vector = 1;
+ }
+ }
+}
+
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
@@ -137,6 +162,7 @@ int aac_fib_setup(struct aac_dev * dev)
i++, fibptr++)
{
fibptr->flags = 0;
+ fibptr->size = sizeof(struct fib);
fibptr->dev = dev;
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
@@ -151,18 +177,49 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
+
+ /*
+ *Assign vector numbers to fibs
+ */
+ aac_fib_vector_assign(dev);
+
/*
* Add the fib chain to the free list
*/
dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
/*
- * Enable this to debug out of queue space
- */
- dev->free_fib = &dev->fibs[0];
+ * Set 8 fibs aside for management tools
+ */
+ dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
return 0;
}
/**
+ * aac_fib_alloc_tag-allocate a fib using tags
+ * @dev: Adapter to allocate the fib for
+ *
+ * Allocate a fib from the adapter fib pool using tags
+ * from the blk layer.
+ */
+
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+{
+ struct fib *fibptr;
+
+ fibptr = &dev->fibs[scmd->request->tag];
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+ */
+ fibptr->hw_fib_va->header.XferState = 0;
+ fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+ fibptr->callback_data = NULL;
+ fibptr->callback = NULL;
+
+ return fibptr;
+}
+
+/**
* aac_fib_alloc - allocate a fib
* @dev: Adapter to allocate the fib for
*
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index da9d9936e995..d677b52860ae 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -394,7 +394,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
fib->callback(fib->callback_data, fib);
} else {
aac_fib_complete(fib);
- aac_fib_free(fib);
}
} else {
unsigned long flagv;
@@ -416,7 +415,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
- aac_fib_free(fib);
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 76eaa38ffd6e..21a67ed047e8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -79,7 +80,7 @@ MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
static DEFINE_MUTEX(aac_mutex);
static LIST_HEAD(aac_devices);
-static int aac_cfg_major = -1;
+static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
/*
@@ -454,6 +455,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
} else
scsi_change_queue_depth(sdev, 1);
+ sdev->tagged_supported = 1;
+
return 0;
}
@@ -700,23 +703,18 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- int ret;
- struct aac_dev *aac;
- aac = (struct aac_dev *)file->private_data;
- if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
+ struct aac_dev *aac = (struct aac_dev *)file->private_data;
+
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- mutex_lock(&aac_mutex);
- ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
- mutex_unlock(&aac_mutex);
- return ret;
+ return aac_do_ioctl(aac, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
{
long ret;
- mutex_lock(&aac_mutex);
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
case FSACTL_SENDFIB:
@@ -750,7 +748,6 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&aac_mutex);
return ret;
}
@@ -1075,6 +1072,8 @@ static void __aac_shutdown(struct aac_dev * aac)
int i;
int cpu;
+ aac_send_shutdown(aac);
+
if (aac->aif_thread) {
int i;
/* Clear out events first */
@@ -1086,7 +1085,6 @@ static void __aac_shutdown(struct aac_dev * aac)
}
kthread_stop(aac->thread);
}
- aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
@@ -1120,6 +1118,13 @@ static void __aac_shutdown(struct aac_dev * aac)
else if (aac->max_msix > 1)
pci_disable_msix(aac->pdev);
}
+static void aac_init_char(void)
+{
+ aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
+ if (aac_cfg_major < 0) {
+ pr_err("aacraid: unable to register \"aac\" device.\n");
+ }
+}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1132,6 +1137,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
u64 dmamask;
extern int aac_sync_mode;
+ /*
+ * Only series 7 needs freset.
+ */
+ if (pdev->device == PMC_DEVICE_S7)
+ pdev->needs_freset = 1;
+
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
break;
@@ -1171,6 +1182,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_cmd_len = 16;
shost->use_cmd_list = 1;
+ if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
+ aac_init_char();
+
aac = (struct aac_dev *)shost->hostdata;
aac->base_start = pci_resource_start(pdev, 0);
aac->scsi_host_ptr = shost;
@@ -1185,6 +1199,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_host;
spin_lock_init(&aac->fib_lock);
+ mutex_init(&aac->ioctl_mutex);
/*
* Map in the registers from the adapter.
*/
@@ -1296,6 +1311,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_deinit;
scsi_scan_host(shost);
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
return 0;
out_deinit:
@@ -1317,7 +1335,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return error;
}
-#if (defined(CONFIG_PM))
static void aac_release_resources(struct aac_dev *aac)
{
int i;
@@ -1404,14 +1421,26 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if (!dev->sync_mode)
+ /*max msix may change after EEH
+ * Re-assign vectors to fibs
+ */
+ aac_fib_vector_assign(dev);
+
+ if (!dev->sync_mode) {
+ /* After EEH recovery or suspend resume, max_msix count
+ * may change, therfore updating in init as well.
+ */
aac_adapter_start(dev);
+ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ }
return 0;
error_iounmap:
return -1;
}
+
+#if (defined(CONFIG_PM))
static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1495,10 +1524,142 @@ static void aac_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
if (list_empty(&aac_devices)) {
unregister_chrdev(aac_cfg_major, "aac");
- aac_cfg_major = -1;
+ aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
+ }
+}
+
+static void aac_flush_ios(struct aac_dev *aac)
+{
+ int i;
+ struct scsi_cmnd *cmd;
+
+ for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
+ cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
+ if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
+ scsi_dma_unmap(cmd);
+
+ if (aac->handle_pci_error)
+ cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_RESET << 16;
+
+ cmd->scsi_done(cmd);
+ }
+ }
+}
+
+static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct aac_dev *aac = shost_priv(shost);
+
+ dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
+
+ switch (error) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ aac->handle_pci_error = 1;
+
+ scsi_block_requests(aac->scsi_host_ptr);
+ aac_flush_ios(aac);
+ aac_release_resources(aac);
+
+ pci_disable_pcie_error_reporting(pdev);
+ aac_adapter_ioremap(aac, 0);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ aac->handle_pci_error = 1;
+
+ aac_flush_ios(aac);
+ return PCI_ERS_RESULT_DISCONNECT;
}
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
+ return PCI_ERS_RESULT_NEED_RESET;
}
+static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
+{
+ dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ dev_warn(&pdev->dev,
+ "aacraid: failed to enable slave\n");
+ goto fail_device;
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
+ goto fail_device;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+fail_device:
+ dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+
+static void aac_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct scsi_device *sdev = NULL;
+ struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (aac_adapter_ioremap(aac, aac->base_size)) {
+
+ dev_err(&pdev->dev, "aacraid: ioremap failed\n");
+ /* remap failed, go back ... */
+ aac->comm_interface = AAC_COMM_PRODUCER;
+ if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
+ dev_warn(&pdev->dev,
+ "aacraid: unable to map adapter.\n");
+
+ return;
+ }
+ }
+
+ msleep(10000);
+
+ aac_acquire_resources(aac);
+
+ /*
+ * reset this flag to unblock ioctl() as it was set
+ * at aac_send_shutdown() to block ioctls from upperlayer
+ */
+ aac->adapter_shutdown = 0;
+ aac->handle_pci_error = 0;
+
+ shost_for_each_device(sdev, shost)
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ sdev->sdev_state = SDEV_RUNNING;
+ scsi_unblock_requests(aac->scsi_host_ptr);
+ scsi_scan_host(aac->scsi_host_ptr);
+ pci_save_state(pdev);
+
+ dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
+}
+
+static struct pci_error_handlers aac_pci_err_handler = {
+ .error_detected = aac_pci_error_detected,
+ .mmio_enabled = aac_pci_mmio_enabled,
+ .slot_reset = aac_pci_slot_reset,
+ .resume = aac_pci_resume,
+};
+
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
@@ -1509,6 +1670,7 @@ static struct pci_driver aac_pci_driver = {
.resume = aac_resume,
#endif
.shutdown = aac_shutdown,
+ .err_handler = &aac_pci_err_handler,
};
static int __init aac_init(void)
@@ -1522,11 +1684,8 @@ static int __init aac_init(void)
if (error < 0)
return error;
- aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
- if (aac_cfg_major < 0) {
- printk(KERN_WARNING
- "aacraid: unable to register \"aac\" device.\n");
- }
+ aac_init_char();
+
return 0;
}
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2aa34ea8ceb1..bc0203f3d243 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+ u16 vector_no;
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
- u_int16_t vector_no, first_choice = 0xffff;
-
- vector_no = dev->fibs_pushed_no % dev->max_msix;
- do {
- vector_no += 1;
- if (vector_no == dev->max_msix)
- vector_no = 1;
- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
- dev->vector_cap)
- break;
- if (0xffff == first_choice)
- first_choice = vector_no;
- else if (vector_no == first_choice)
- break;
- } while (1);
- if (vector_no == first_choice)
- vector_no = 0;
- atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->fibs_pushed_no == 0xffffffff)
- dev->fibs_pushed_no = 0;
- else
- dev->fibs_pushed_no++;
+ vector_no = fib->vector_no;
fib->hw_fib_va->header.Handle += (vector_no << 16);
+ } else {
+ vector_no = 0;
}
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (hdr_size + 127) / 128 - 1;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5b8b2937a3fe..7db448ec8beb 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -403,6 +403,9 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
if (!cptr)
return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ sg_count = 0;
+ cptr = NULL;
}
/* Use the outgoing mailboxes in a round-robin fashion, because this
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b846a4683562..fc6a83188c1e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
case AHC_DEV_Q_TAGGED:
scsi_change_queue_depth(sdev,
dev->openings + dev->active);
+ break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index deaaf84989cd..12b88294d667 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -677,7 +677,8 @@ int round_period(unsigned int period)
* Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting
*/
static
-unsigned char calc_sync_xfer(unsigned int period, unsigned int offset)
+unsigned char __maybe_unused calc_sync_xfer(unsigned int period,
+ unsigned int offset)
{
return sync_xfer_table[round_period(period)].reg_value |
((offset < SDTR_SIZE) ? offset : SDTR_SIZE);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index decdc71b6b86..24388795ee9a 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -98,6 +98,7 @@ static int level_mask = LOG_ERROR;
module_param(level_mask, int, 0644);
+#ifndef MODULE
static int __init fas216_log_setup(char *str)
{
char *s;
@@ -138,6 +139,7 @@ static int __init fas216_log_setup(char *str)
}
__setup("fas216_logging=", fas216_log_setup);
+#endif
static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg)
{
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index e65478651ca9..389825ba5d96 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
/*
@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
#ifdef SUPPORT_TAGS
cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
#endif
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
maybe_release_dma_irq(instance);
} else {
@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(REAL_DMA)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else {
#ifdef REAL_DMA
@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} else
#endif /* defined(REAL_DMA) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
#if defined(CONFIG_SUN3) && defined(REAL_DMA)
/* if we had intended to dma that command clear it */
if (sun3_dma_setup_done == cmd)
sun3_dma_setup_done = NULL;
#endif
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
#ifdef SUPPORT_TAGS
free_all_tags(hostdata);
#endif
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a41c6432f444..ee5ace873535 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -42,7 +42,7 @@ struct be_queue_info {
u16 id;
u16 tail, head;
bool created;
- atomic_t used; /* Number of valid elements in the queue */
+ u16 used; /* Number of valid elements in the queue */
};
static inline u32 MODULO(u16 val, u16 limit)
@@ -110,10 +110,9 @@ struct be_mcc_obj {
};
struct beiscsi_mcc_tag_state {
-#define MCC_TAG_STATE_COMPLETED 0x00
-#define MCC_TAG_STATE_RUNNING 0x01
-#define MCC_TAG_STATE_TIMEOUT 0x02
- uint8_t tag_state;
+ unsigned long tag_state;
+#define MCC_TAG_STATE_RUNNING 1
+#define MCC_TAG_STATE_TIMEOUT 2
struct be_dma_mem tag_mem_state;
};
@@ -124,7 +123,7 @@ struct be_ctrl_info {
struct pci_dev *pdev;
/* Mbox used for cmd request/response */
- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
@@ -133,11 +132,10 @@ struct be_ctrl_info {
/* MCC Rings */
struct be_mcc_obj mcc_obj;
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
- spinlock_t mcc_cq_lock;
wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
unsigned int mcc_tag[MAX_MCC_CMD];
- unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+ unsigned int mcc_tag_status[MAX_MCC_CMD + 1];
unsigned short mcc_alloc_index;
unsigned short mcc_free_index;
unsigned int mcc_tag_available;
@@ -147,6 +145,12 @@ struct be_ctrl_info {
#include "be_cmds.h"
+/* WRB index mask for MCC_Q_LEN queue entries */
+#define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK
+#define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT
+/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
+#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
+
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 12s timeout */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 2778089b01a5..a55eaeea37e7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -104,24 +104,16 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
return 0;
}
-void be_mcc_notify(struct beiscsi_hba *phba)
-{
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- u32 val = 0;
-
- val |= mccq->id & DB_MCCQ_RING_ID_MASK;
- val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
- iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
-}
-
unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
{
unsigned int tag = 0;
+ spin_lock(&phba->ctrl.mcc_lock);
if (phba->ctrl.mcc_tag_available) {
tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
- phba->ctrl.mcc_numtag[tag] = 0;
+ phba->ctrl.mcc_tag_status[tag] = 0;
+ phba->ctrl.ptag_state[tag].tag_state = 0;
}
if (tag) {
phba->ctrl.mcc_tag_available--;
@@ -130,11 +122,89 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
else
phba->ctrl.mcc_alloc_index++;
}
+ spin_unlock(&phba->ctrl.mcc_lock);
return tag;
}
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+ unsigned int *ref_tag)
+{
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ struct be_mcc_wrb *wrb = NULL;
+ unsigned int tag;
+
+ spin_lock_bh(&phba->ctrl.mcc_lock);
+ if (mccq->used == mccq->len) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
+ mccq->used, phba->ctrl.mcc_tag_available);
+ goto alloc_failed;
+ }
+
+ if (!phba->ctrl.mcc_tag_available)
+ goto alloc_failed;
+
+ tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
+ phba->ctrl.mcc_tag_available,
+ phba->ctrl.mcc_alloc_index);
+ goto alloc_failed;
+ }
+
+ /* return this tag for further reference */
+ *ref_tag = tag;
+ phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+ phba->ctrl.mcc_tag_status[tag] = 0;
+ phba->ctrl.ptag_state[tag].tag_state = 0;
+ phba->ctrl.mcc_tag_available--;
+ if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+ phba->ctrl.mcc_alloc_index = 0;
+ else
+ phba->ctrl.mcc_alloc_index++;
+
+ wrb = queue_head_node(mccq);
+ memset(wrb, 0, sizeof(*wrb));
+ wrb->tag0 = tag;
+ wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
+ queue_head_inc(mccq);
+ mccq->used++;
+
+alloc_failed:
+ spin_unlock_bh(&phba->ctrl.mcc_lock);
+ return wrb;
+}
+
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+
+ spin_lock_bh(&ctrl->mcc_lock);
+ tag = tag & MCC_Q_CMD_TAG_MASK;
+ ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+ if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+ ctrl->mcc_free_index = 0;
+ else
+ ctrl->mcc_free_index++;
+ ctrl->mcc_tag_available++;
+ mccq->used--;
+ spin_unlock_bh(&ctrl->mcc_lock);
+}
+
+/**
+ * beiscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
/*
- * beiscsi_mccq_compl()- Wait for completion of MBX
+ * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
* @phba: Driver private structure
* @tag: Tag for the MBX Command
* @wrb: the WRB used for the MBX Command
@@ -146,43 +216,40 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
* Success: 0
* Failure: Non-Zero
**/
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
- struct be_dma_mem *mbx_cmd_mem)
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem)
{
int rc = 0;
- uint32_t mcc_tag_response;
+ uint32_t mcc_tag_status;
uint16_t status = 0, addl_status = 0, wrb_num = 0;
struct be_mcc_wrb *temp_wrb;
struct be_cmd_req_hdr *mbx_hdr;
struct be_cmd_resp_hdr *mbx_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- if (beiscsi_error(phba)) {
- free_mcc_tag(&phba->ctrl, tag);
+ if (beiscsi_error(phba))
return -EPERM;
- }
-
- /* Set MBX Tag state to Active */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
- spin_unlock(&phba->ctrl.mbox_lock);
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag],
+ phba->ctrl.mcc_tag_status[tag],
msecs_to_jiffies(
BEISCSI_HOST_MBX_TIMEOUT));
-
+ /**
+ * If MBOX cmd timeout expired, tag and resource allocated
+ * for cmd is not freed until FW returns completion.
+ */
if (rc <= 0) {
struct be_dma_mem *tag_mem;
- /* Set MBX Tag state to timeout */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
- spin_unlock(&phba->ctrl.mbox_lock);
- /* Store resource addr to be freed later */
+ /**
+ * PCI/DMA memory allocated and posted in non-embedded mode
+ * will have mbx_cmd_mem != NULL.
+ * Save virtual and bus addresses for the command so that it
+ * can be freed later.
+ **/
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (mbx_cmd_mem) {
tag_mem->size = mbx_cmd_mem->size;
@@ -191,28 +258,28 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
} else
tag_mem->size = 0;
+ /* first make tag_mem_state visible to all */
+ wmb();
+ set_bit(MCC_TAG_STATE_TIMEOUT,
+ &phba->ctrl.ptag_state[tag].tag_state);
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
return -EBUSY;
- } else {
- rc = 0;
- /* Set MBX Tag state to completed */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
- spin_unlock(&phba->ctrl.mbox_lock);
}
- mcc_tag_response = phba->ctrl.mcc_numtag[tag];
- status = (mcc_tag_response & CQE_STATUS_MASK);
- addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ rc = 0;
+ mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+ status = (mcc_tag_status & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
CQE_STATUS_ADDL_SHIFT);
if (mbx_cmd_mem) {
mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
} else {
- wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
CQE_STATUS_WRB_SHIFT;
temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
mbx_hdr = embedded_payload(temp_wrb);
@@ -231,7 +298,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
mbx_hdr->subsystem,
mbx_hdr->opcode,
status, addl_status);
-
+ rc = -EIO;
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
beiscsi_log(phba, KERN_WARNING,
@@ -241,70 +308,16 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
"Resp_Len : %d Actual_Resp_Len : %d\n",
mbx_resp_hdr->response_length,
mbx_resp_hdr->actual_resp_len);
-
rc = -EAGAIN;
- goto release_mcc_tag;
}
- rc = -EIO;
}
-release_mcc_tag:
- /* Release the MCC entry */
- free_mcc_tag(&phba->ctrl, tag);
-
+ free_mcc_wrb(&phba->ctrl, tag);
return rc;
}
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
-{
- spin_lock(&ctrl->mbox_lock);
- tag = tag & 0x000000FF;
- ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
- if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
- ctrl->mcc_free_index = 0;
- else
- ctrl->mcc_free_index++;
- ctrl->mcc_tag_available++;
- spin_unlock(&ctrl->mbox_lock);
-}
-
-bool is_link_state_evt(u32 trailer)
-{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE);
-}
-
-static bool is_iscsi_evt(u32 trailer)
-{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_ISCSI;
-}
-
-static int iscsi_evt_type(u32 trailer)
-{
- return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
-}
-
-static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
-{
- if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else
- return false;
-}
-
-static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
-{
- compl->flags = 0;
-}
-
/*
- * be_mcc_compl_process()- Check the MBX comapletion status
+ * beiscsi_process_mbox_compl()- Check the MBX completion status
* @ctrl: Function specific MBX data structure
* @compl: Completion status of MBX Command
*
@@ -314,8 +327,8 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
* Success: Zero
* Failure: Non-Zero
**/
-static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl)
+static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -323,206 +336,228 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
struct be_cmd_resp_hdr *resp_hdr;
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
-
+ /**
+ * To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new
+ * entry is little endian)
+ */
+ if (!compl->flags) {
beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : error in cmd completion: "
- "Subsystem : %d Opcode : %d "
- "status(compl/extd)=%d/%d\n",
- hdr->subsystem, hdr->opcode,
- compl_status, extd_status);
-
- if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- resp_hdr = (struct be_cmd_resp_hdr *) hdr;
- if (resp_hdr->response_length)
- return 0;
- }
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : BMBX busy, no completion\n");
return -EBUSY;
}
- return 0;
-}
-
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl)
-{
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- u16 compl_status, extd_status;
- unsigned short tag;
+ compl->flags = le32_to_cpu(compl->flags);
+ WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ /**
+ * Just swap the status to host endian;
+ * mcc tag is opaquely copied from mcc_wrb.
+ */
be_dws_le_to_cpu(compl, 4);
-
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- /* The ctrl.mcc_numtag[tag] is filled with
- * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
- * [7:0] = compl_status
- */
- tag = (compl->tag0 & 0x000000FF);
+ CQE_STATUS_COMPL_MASK;
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
+ CQE_STATUS_EXTD_MASK;
+ /* Need to reset the entire word that houses the valid bit */
+ compl->flags = 0;
- ctrl->mcc_numtag[tag] = 0x80000000;
- ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
- ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
- ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+ if (compl_status == MCC_STATUS_SUCCESS)
+ return 0;
- if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
- wake_up_interruptible(&ctrl->mcc_wait[tag]);
- } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
- struct be_dma_mem *tag_mem;
- tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode, compl_status, extd_status);
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
- BEISCSI_LOG_CONFIG,
- "BC_%d : MBX Completion for timeout Command "
- "from FW\n");
- /* Check if memory needs to be freed */
- if (tag_mem->size)
- pci_free_consistent(ctrl->pdev, tag_mem->size,
- tag_mem->va, tag_mem->dma);
-
- /* Change tag state */
- spin_lock(&phba->ctrl.mbox_lock);
- ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
- spin_unlock(&phba->ctrl.mbox_lock);
-
- /* Free MCC Tag */
- free_mcc_tag(ctrl, tag);
+ if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+ /* if status is insufficient buffer, check the length */
+ resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+ if (resp_hdr->response_length)
+ return 0;
}
-
- return 0;
+ return -EINVAL;
}
-static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
+static void beiscsi_process_async_link(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
- struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+ struct be_async_event_link_state *evt;
+
+ evt = (struct be_async_event_link_state *)compl;
- if (be_mcc_compl_is_new(compl)) {
- queue_tail_inc(mcc_cq);
- return compl;
+ phba->port_speed = evt->port_speed;
+ /**
+ * Check logical link status in ASYNC event.
+ * This has been newly introduced in SKH-R Firmware 10.0.338.45.
+ **/
+ if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
+ phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Link Up on Port %d tag 0x%x\n",
+ evt->physical_port, evt->event_tag);
+ } else {
+ phba->state = BE_ADAPTER_LINK_DOWN;
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Link Down on Port %d tag 0x%x\n",
+ evt->physical_port, evt->event_tag);
+ iscsi_host_for_each_session(phba->shost,
+ beiscsi_fail_session);
}
- return NULL;
}
-/**
- * be2iscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- *
- * Depending on adapter state appropriate error flag is passed.
- **/
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+static char *beiscsi_port_misconf_event_msg[] = {
+ "Physical Link is functional.",
+ "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
+ "Optics of two types installed - Remove one optic or install matching pair of optics.",
+ "Incompatible optics - Replace with compatible optics for card to function.",
+ "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
+ "Uncertified optics - Replace with Avago Certified optics to enable link operation."
+};
+
+static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct beiscsi_hba *phba = iscsi_host_priv(shost);
- uint32_t iscsi_err_flag;
+ struct be_async_event_sli *async_sli;
+ u8 evt_type, state, old_state, le;
+ char *sev = KERN_WARNING;
+ char *msg = NULL;
+
+ evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
+ evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ /* processing only MISCONFIGURED physical port event */
+ if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
+ return;
+
+ async_sli = (struct be_async_event_sli *)compl;
+ state = async_sli->event_data1 >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+ le = async_sli->event_data2 >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+
+ old_state = phba->optic_state;
+ phba->optic_state = state;
+
+ if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
+ /* fw is reporting a state we don't know, log and return */
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
+ phba->port_name, async_sli->event_data1);
+ return;
+ }
- if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
- iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
- else
- iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+ if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
+ /* log link effect for unqualified-4, uncertified-5 optics */
+ if (state > 3)
+ msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
+ " Link is non-operational." :
+ " Link is operational.";
+ /* 1 - info */
+ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
+ sev = KERN_INFO;
+ /* 2 - error */
+ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
+ sev = KERN_ERR;
+ }
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+ if (old_state != phba->optic_state)
+ __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
+ phba->port_name,
+ beiscsi_port_misconf_event_msg[state],
+ !msg ? "" : msg);
}
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
- struct be_async_event_link_state *evt)
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
- ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
- (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_LINK_DOWN;
-
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
- "BC_%d : Link Down on Port %d\n",
- evt->physical_port);
-
- iscsi_host_for_each_session(phba->shost,
- be2iscsi_fail_session);
- } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
- ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
- (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+ char *sev = KERN_INFO;
+ u8 evt_code;
+
+ /* interpret flags as an async trailer */
+ evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
+ evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
+ switch (evt_code) {
+ case ASYNC_EVENT_CODE_LINK_STATE:
+ beiscsi_process_async_link(phba, compl);
+ break;
+ case ASYNC_EVENT_CODE_ISCSI:
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
phba->get_boot = BE_GET_BOOT_RETRIES;
-
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
- "BC_%d : Link UP on Port %d\n",
- evt->physical_port);
+ sev = KERN_ERR;
+ break;
+ case ASYNC_EVENT_CODE_SLI:
+ beiscsi_process_async_sli(phba, compl);
+ break;
+ default:
+ /* event not registered */
+ sev = KERN_ERR;
}
+
+ beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
+ evt_code, compl->status, compl->flags);
}
-int beiscsi_process_mcc(struct beiscsi_hba *phba)
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
{
- struct be_mcc_compl *compl;
- int num = 0, status = 0;
- struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ u16 compl_status, extd_status;
+ struct be_dma_mem *tag_mem;
+ unsigned int tag, wrb_idx;
- spin_lock_bh(&phba->ctrl.mcc_cq_lock);
- while ((compl = be_mcc_compl_get(phba))) {
- if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) compl);
- else if (is_iscsi_evt(compl->flags)) {
- switch (iscsi_evt_type(compl->flags)) {
- case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
- case ASYNC_EVENT_NEW_ISCSI_CONN:
- case ASYNC_EVENT_NEW_TCP_CONN:
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Async iscsi Event,"
- " flags handled = 0x%08x\n",
- compl->flags);
- break;
- default:
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Unsupported Async"
- " Event, flags = 0x%08x\n",
- compl->flags);
- }
- } else
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Unsupported Async Event, flags"
- " = 0x%08x\n", compl->flags);
-
- } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(ctrl, compl);
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
- }
- be_mcc_compl_use(compl);
- num++;
+ be_dws_le_to_cpu(compl, 4);
+ tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
+ wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
+
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX cmd completed but not posted\n");
+ return 0;
}
- if (num)
- hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
+ if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Completion for timeout Command from FW\n");
+ /**
+ * Check for the size before freeing resource.
+ * Only for non-embedded cmd, PCI resource is allocated.
+ **/
+ tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+ if (tag_mem->size)
+ pci_free_consistent(ctrl->pdev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+ free_mcc_wrb(ctrl, tag);
+ return 0;
+ }
- spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
- return status;
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ /* The ctrl.mcc_tag_status[tag] is filled with
+ * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+ * [7:0] = compl_status
+ */
+ ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
+ ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
+ ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
+ CQE_STATUS_ADDL_MASK;
+ ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
+
+ /* write ordering forced in wake_up_interruptible */
+ clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ return 0;
}
/*
- * be_mcc_wait_compl()- Wait for MBX completion
+ * be_mcc_compl_poll()- Wait for MBX completion
* @phba: driver private structure
*
* Wait till no more pending mcc requests are present
@@ -532,50 +567,57 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
* Failure: Non-Zero
*
**/
-static int be_mcc_wait_compl(struct beiscsi_hba *phba)
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
{
- int i, status;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ int i;
+
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d: tag %u state not running\n", tag);
+ return 0;
+ }
for (i = 0; i < mcc_timeout; i++) {
if (beiscsi_error(phba))
return -EIO;
- status = beiscsi_process_mcc(phba);
- if (status)
- return status;
-
- if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
+ beiscsi_process_mcc_cq(phba);
+ /* after polling, wrb and tag need to be released */
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ free_mcc_wrb(ctrl, tag);
break;
+ }
udelay(100);
}
- if (i == mcc_timeout) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
- return -EBUSY;
- }
- return 0;
+
+ if (i < mcc_timeout)
+ return 0;
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
+ return -EBUSY;
}
-/*
- * be_mcc_notify_wait()- Notify and wait for Compl
- * @phba: driver private structure
- *
- * Notify MCC requests and wait for completion
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- **/
-int be_mcc_notify_wait(struct beiscsi_hba *phba)
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
{
- be_mcc_notify(phba);
- return be_mcc_wait_compl(phba);
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ u32 val = 0;
+
+ set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ /* make request available for DMA */
+ wmb();
+ iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
}
/*
- * be_mbox_db_ready_wait()- Check ready status
+ * be_mbox_db_ready_poll()- Check ready status
* @ctrl: Function specific MBX data structure
*
* Check for the ready status of FW to send BMBX
@@ -585,49 +627,45 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
* Success: 0
* Failure: Non-Zero
**/
-static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
+static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
{
-#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
+ /* wait 30s for generic non-flash MBOX operation */
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
unsigned long timeout;
- bool read_flag = false;
- int ret = 0, i;
u32 ready;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
- if (beiscsi_error(phba))
- return -EIO;
+ /*
+ * This BMBX busy wait path is used during init only.
+ * For the commands executed during init, 5s should suffice.
+ */
+ timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
+ do {
+ if (beiscsi_error(phba))
+ return -EIO;
- timeout = jiffies + (HZ * 110);
+ ready = ioread32(db);
+ if (ready == 0xffffffff)
+ return -EIO;
- do {
- for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
- if (ready) {
- read_flag = true;
- break;
- }
- mdelay(1);
- }
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ return 0;
- if (!read_flag) {
- wait_event_timeout(rdybit_check_q,
- (read_flag != true),
- HZ * 5);
- }
- } while ((time_before(jiffies, timeout)) && !read_flag);
+ if (time_after(jiffies, timeout))
+ break;
+ msleep(20);
+ } while (!ready);
- if (!read_flag) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
- ret = -EBUSY;
- }
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
- return ret;
+ return -EBUSY;
}
/*
@@ -648,10 +686,8 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
struct be_mcc_mailbox *mbox = mbox_mem->va;
- struct be_mcc_compl *compl = &mbox->compl;
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
@@ -660,7 +696,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
iowrite32(val, db);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
@@ -670,81 +706,15 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
val |= (u32) (mbox_mem->dma >> 4) << 2;
iowrite32(val, db);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
- if (be_mcc_compl_is_new(compl)) {
- status = be_mcc_compl_process(ctrl, &mbox->compl);
- be_mcc_compl_use(compl);
- if (status) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : After be_mcc_compl_process\n");
-
- return status;
- }
- } else {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : Invalid Mailbox Completion\n");
-
- return -EBUSY;
- }
- return 0;
-}
-
-/*
- * Insert the mailbox address into the doorbell in two steps
- * Polls on the mbox doorbell till a command completion (or a timeout) occurs
- */
-static int be_mbox_notify_wait(struct beiscsi_hba *phba)
-{
- int status;
- u32 val = 0;
- void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
- struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
- struct be_mcc_mailbox *mbox = mbox_mem->va;
- struct be_mcc_compl *compl = &mbox->compl;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- status = be_mbox_db_ready_wait(ctrl);
- if (status)
- return status;
-
- val |= MPU_MAILBOX_DB_HI_MASK;
- /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
- val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
- iowrite32(val, db);
-
- /* wait for ready to be set */
- status = be_mbox_db_ready_wait(ctrl);
- if (status != 0)
- return status;
-
- val = 0;
- /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
- val |= (u32)(mbox_mem->dma >> 4) << 2;
- iowrite32(val, db);
+ /* RDY is set; small delay before CQE read. */
+ udelay(1);
- status = be_mbox_db_ready_wait(ctrl);
- if (status != 0)
- return status;
-
- /* A cq entry has been made now */
- if (be_mcc_compl_is_new(compl)) {
- status = be_mcc_compl_process(ctrl, &mbox->compl);
- be_mcc_compl_use(compl);
- if (status)
- return status;
- } else {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : invalid mailbox completion\n");
-
- return -EBUSY;
- }
- return 0;
+ status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
+ return status;
}
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
@@ -809,21 +779,6 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
-{
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- struct be_mcc_wrb *wrb;
-
- WARN_ON(atomic_read(&mccq->used) >= mccq->len);
- wrb = queue_head_node(mccq);
- memset(wrb, 0, sizeof(*wrb));
- wrb->tag0 = (mccq->head & 0x000000FF) << 16;
- queue_head_inc(mccq);
- atomic_inc(&mccq->used);
- return wrb;
-}
-
-
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -833,7 +788,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -860,7 +815,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -881,7 +836,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
int status;
u8 *endian_check;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
@@ -900,7 +855,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_initialize Failed\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -921,7 +876,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
int status;
u8 *endian_check;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
@@ -941,7 +896,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_uninit Failed\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -957,7 +912,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1007,7 +962,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
"BC_%d : In be_cmd_cq_create, status=ox%08x\n",
status);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1025,13 +980,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcc_create *req;
+ struct be_cmd_req_mcc_create_ext *req;
struct be_dma_mem *q_mem = &mccq->dma_mem;
struct be_ctrl_info *ctrl;
void *ctxt;
int status;
- spin_lock(&phba->ctrl.mbox_lock);
+ mutex_lock(&phba->ctrl.mbox_lock);
ctrl = &phba->ctrl;
wrb = wrb_from_mbox(&ctrl->mbox_mem);
memset(wrb, 0, sizeof(*wrb));
@@ -1041,9 +996,12 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
+ req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
+ req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
PCI_FUNC(phba->pcidev->devfn));
@@ -1056,13 +1014,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(phba);
+ status = be_mbox_notify(ctrl);
if (!status) {
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
- spin_unlock(&phba->ctrl.mbox_lock);
+ mutex_unlock(&phba->ctrl.mbox_lock);
return status;
}
@@ -1080,7 +1038,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
"BC_%d : In beiscsi_cmd_q_destroy "
"queue_type : %d\n", queue_type);
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1110,7 +1068,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
break;
default:
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
BUG();
return -ENXIO;
}
@@ -1120,7 +1078,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1155,7 +1113,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1227,7 +1185,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
defq_ring->doorbell_offset = resp->doorbell_offset;
}
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1255,7 +1213,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1286,7 +1244,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
pwrb_context->doorbell_offset = resp->doorbell_offset;
}
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1297,7 +1255,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
struct be_post_template_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1310,7 +1268,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1320,7 +1278,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
struct be_remove_template_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1331,7 +1289,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1350,7 +1308,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
if (num_pages == 0xff)
num_pages = 1;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
do {
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1379,7 +1337,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
}
} while (num_pages > 0);
error:
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
if (status != 0)
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
return status;
@@ -1392,15 +1350,15 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
struct be_post_sgl_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
- status = be_mbox_notify_wait(phba);
+ status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1417,21 +1375,20 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
int be_cmd_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
- unsigned int tag = 0;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_set_vlan_req *req;
struct be_ctrl_info *ctrl = &phba->ctrl;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
@@ -1440,8 +1397,8 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
req->interface_hndl = phba->interface_handle;
req->vlan_priority = vlan_tag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 4bfca355fbe4..deeb951e6874 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -58,15 +58,16 @@ struct be_mcc_wrb {
#define MCC_STATUS_ILLEGAL_FIELD 0x3
#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 16 /* bits 31 - 16 */
#define CQE_STATUS_ADDL_MASK 0xFF00
-#define CQE_STATUS_MASK 0xFF
-#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_ADDL_SHIFT 8
+#define CQE_STATUS_MASK 0xFF
#define CQE_STATUS_WRB_MASK 0xFF0000
#define CQE_STATUS_WRB_SHIFT 16
+
#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
#define BEISCSI_FW_MBX_TIMEOUT 100
@@ -119,13 +120,22 @@ struct be_mcc_compl {
#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
#define ASYNC_EVENT_CODE_ISCSI 0x4
+#define ASYNC_EVENT_CODE_SLI 0x11
#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+
+/* iSCSI events */
#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5
#define ASYNC_EVENT_NEW_TCP_CONN 0x7
+/* SLI events */
+#define ASYNC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
+#define ASYNC_SLI_LINK_EFFECT_VALID(le) (le & 0x80)
+#define ASYNC_SLI_LINK_EFFECT_SEV(le) ((le >> 1) & 0x03)
+#define ASYNC_SLI_LINK_EFFECT_STATE(le) (le & 0x01)
+
struct be_async_event_trailer {
u32 code;
};
@@ -133,7 +143,6 @@ struct be_async_event_trailer {
enum {
ASYNC_EVENT_LINK_DOWN = 0x0,
ASYNC_EVENT_LINK_UP = 0x1,
- ASYNC_EVENT_LOGICAL = 0x2
};
/**
@@ -143,16 +152,39 @@ enum {
struct be_async_event_link_state {
u8 physical_port;
u8 port_link_status;
+/**
+ * ASYNC_EVENT_LINK_DOWN 0x0
+ * ASYNC_EVENT_LINK_UP 0x1
+ * ASYNC_EVENT_LINK_LOGICAL_DOWN 0x2
+ * ASYNC_EVENT_LINK_LOGICAL_UP 0x3
+ */
+#define BE_ASYNC_LINK_UP_MASK 0x01
u8 port_duplex;
u8 port_speed;
-#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
-#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
-#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
+/* BE2ISCSI_LINK_SPEED_ZERO 0x00 - no link */
+#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
+#define BE2ISCSI_LINK_SPEED_25GBPS 0x06
+#define BE2ISCSI_LINK_SPEED_40GBPS 0x07
u8 port_fault;
- u8 rsvd0[7];
+ u8 event_reason;
+ u16 qos_link_speed;
+ u32 event_tag;
struct be_async_event_trailer trailer;
} __packed;
+/**
+ * When async-trailer is SLI event, mcc_compl is interpreted as
+ */
+struct be_async_event_sli {
+ u32 event_data1;
+ u32 event_data2;
+ u32 reserved;
+ u32 trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -172,6 +204,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_MCC_CREATE_EXT 90
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
@@ -183,6 +216,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_GET_PORT_NAME 77
/**
* LIST of opcodes that are common between Initiator and Target
@@ -587,10 +621,11 @@ struct amap_mcc_context {
u8 rsvd2[32];
} __packed;
-struct be_cmd_req_mcc_create {
+struct be_cmd_req_mcc_create_ext {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
+ u32 async_evt_bitmap;
u8 context[sizeof(struct amap_mcc_context) / 8];
struct phys_addr pages[8];
} __packed;
@@ -653,20 +688,6 @@ struct be_cmd_req_modify_eq_delay {
/******************** Get MAC ADDR *******************/
-#define ETH_ALEN 6
-
-struct be_cmd_get_nic_conf_req {
- struct be_cmd_req_hdr hdr;
- u32 nic_port_count;
- u32 speed;
- u32 max_speed;
- u32 link_state;
- u32 max_frame_size;
- u16 size_of_structure;
- u8 mac_address[ETH_ALEN];
- u32 rsvd[23];
-};
-
struct be_cmd_get_nic_conf_resp {
struct be_cmd_resp_hdr hdr;
u32 nic_port_count;
@@ -675,9 +696,8 @@ struct be_cmd_get_nic_conf_resp {
u32 link_state;
u32 max_frame_size;
u16 size_of_structure;
- u8 mac_address[6];
- u32 rsvd[23];
-};
+ u8 mac_address[ETH_ALEN];
+} __packed;
#define BEISCSI_ALIAS_LEN 32
@@ -689,29 +709,6 @@ struct be_cmd_hba_name {
u8 initiator_alias[BEISCSI_ALIAS_LEN];
} __packed;
-struct be_cmd_ntwk_link_status_req {
- struct be_cmd_req_hdr hdr;
- u32 rsvd0;
-} __packed;
-
-/*** Port Speed Values ***/
-#define BE2ISCSI_LINK_SPEED_ZERO 0x00
-#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
-#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
-#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
-#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
-struct be_cmd_ntwk_link_status_resp {
- struct be_cmd_resp_hdr hdr;
- u8 phys_port;
- u8 mac_duplex;
- u8 mac_speed;
- u8 mac_fault;
- u8 mgmt_mac_duplex;
- u8 mgmt_mac_speed;
- u16 qos_link_speed;
- u32 logical_link_speed;
-} __packed;
-
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay);
@@ -730,28 +727,28 @@ int be_poll_mcc(struct be_ctrl_info *ctrl);
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
int num);
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
- struct be_dma_mem *mbx_cmd_mem);
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
-int be_mcc_notify_wait(struct beiscsi_hba *phba);
-void be_mcc_notify(struct beiscsi_hba *phba);
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
- struct be_async_event_link_state *evt);
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl);
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+ unsigned int *ref_tag);
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl);
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl);
+
int be_mbox_notify(struct be_ctrl_info *ctrl);
@@ -777,8 +774,6 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct hwi_wrb_context *pwrb_context,
uint8_t ulp_num);
-bool is_link_state_evt(u32 trailer);
-
/* Configuration Functions */
int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
@@ -1137,6 +1132,21 @@ struct be_cmd_get_all_if_id_req {
u32 if_hndl_list[1];
} __packed;
+struct be_cmd_get_port_name {
+ union {
+ struct be_cmd_req_hdr req_hdr;
+ struct be_cmd_resp_hdr resp_hdr;
+ } h;
+ union {
+ struct {
+ u32 reserved;
+ } req;
+ struct {
+ u32 port_names;
+ } resp;
+ } p;
+} __packed;
+
#define ISCSI_OPCODE_SCSI_DATA_OUT 5
#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
@@ -1367,5 +1377,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 022e87b62e40..09f89a3eaa87 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -367,13 +367,14 @@ beiscsi_set_vlan_tag(struct Scsi_Host *shost,
struct iscsi_iface_param_info *iface_param)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- int ret = 0;
+ int ret;
/* Get the Interface Handle */
- if (mgmt_get_all_if_id(phba)) {
+ ret = mgmt_get_all_if_id(phba);
+ if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Getting Interface Handle Failed\n");
- return -EIO;
+ return ret;
}
switch (iface_param->param) {
@@ -465,6 +466,10 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
ret = mgmt_set_ip(phba, iface_param, NULL,
ISCSI_BOOTPROTO_STATIC);
break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = beiscsi_set_vlan_tag(shost, iface_param);
+ break;
default:
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Param %d not supported\n",
@@ -730,7 +735,7 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -753,7 +758,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
+ ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
}
@@ -761,34 +766,13 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
* beiscsi_get_port_speed - Get the Port Speed from Adapter
* @shost : pointer to scsi_host structure
*
- * returns Success/Failure
*/
-static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+static void beiscsi_get_port_speed(struct Scsi_Host *shost)
{
- int rc;
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_ntwk_link_status_resp *resp;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- tag = be_cmd_get_port_speed(phba);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Getting Port Speed Failed\n");
-
- return -EBUSY;
- }
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : Port Speed MBX Failed\n");
- return rc;
- }
- resp = embedded_payload(wrb);
-
- switch (resp->mac_speed) {
+ switch (phba->port_speed) {
case BE2ISCSI_LINK_SPEED_10MBPS:
ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
break;
@@ -801,10 +785,15 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
case BE2ISCSI_LINK_SPEED_10GBPS:
ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
break;
+ case BE2ISCSI_LINK_SPEED_25GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_25GBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_40GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_40GBPS;
+ break;
default:
ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
}
- return 0;
}
/**
@@ -854,12 +843,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
break;
case ISCSI_HOST_PARAM_PORT_SPEED:
- status = beiscsi_get_port_speed(shost);
- if (status) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Port Speed Failed\n");
- return status;
- }
+ beiscsi_get_port_speed(shost);
status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
break;
default:
@@ -1159,7 +1143,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
return -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1293,7 +1277,7 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
irq_poll_disable(&pbe_eq->iopoll);
- beiscsi_process_cq(pbe_eq);
+ beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
irq_poll_enable(&pbe_eq->iopoll);
}
}
@@ -1318,7 +1302,7 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
ret = -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
/* Flush the CQ entries */
beiscsi_flush_cq(phba);
@@ -1393,7 +1377,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_ep->ep_cid);
}
- beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
free_ep:
msleep(BEISCSI_LOGOUT_SYNC_DELAY);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index cb9072a841be..f05e7737107d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -29,6 +29,7 @@
#include <linux/iscsi_boot_sysfs.h>
#include <linux/module.h>
#include <linux/bsg-lib.h>
+#include <linux/irq_poll.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_bsg_iscsi.h>
@@ -285,7 +286,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -366,7 +367,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
return FAILED;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -727,9 +728,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->mbox_lock);
+ mutex_init(&ctrl->mbox_lock);
spin_lock_init(&phba->ctrl.mcc_lock);
- spin_lock_init(&phba->ctrl.mcc_cq_lock);
return status;
}
@@ -895,31 +895,17 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
static irqreturn_t be_isr_msix(int irq, void *dev_id)
{
struct beiscsi_hba *phba;
- struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- struct be_queue_info *cq;
- unsigned int num_eq_processed;
struct be_eq_obj *pbe_eq;
pbe_eq = dev_id;
eq = &pbe_eq->q;
- cq = pbe_eq->cq;
- eqe = queue_tail_node(eq);
phba = pbe_eq->phba;
- num_eq_processed = 0;
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- irq_poll_sched(&pbe_eq->iopoll);
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
-
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+ /* disable interrupt till iopoll completes */
+ hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
+ irq_poll_sched(&pbe_eq->iopoll);
return IRQ_HANDLED;
}
@@ -996,6 +982,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
{
struct pci_dev *pcidev = phba->pcidev;
@@ -1070,7 +1057,7 @@ free_msix_irqs:
void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
- unsigned char rearm, unsigned char event)
+ unsigned char rearm)
{
u32 val = 0;
@@ -1145,6 +1132,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ spin_lock_bh(&phba->io_sgl_lock);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -1162,12 +1150,14 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
+ spin_unlock_bh(&phba->io_sgl_lock);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
+ spin_lock_bh(&phba->io_sgl_lock);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -1182,6 +1172,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
+ spin_unlock_bh(&phba->io_sgl_lock);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1190,6 +1181,25 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
+ spin_unlock_bh(&phba->io_sgl_lock);
+}
+
+static inline struct wrb_handle *
+beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
+ unsigned int wrbs_per_cxn)
+{
+ struct wrb_handle *pwrb_handle;
+
+ spin_lock_bh(&pwrb_context->wrb_lock);
+ pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
+ pwrb_context->wrb_handles_available--;
+ if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
+ pwrb_context->alloc_index = 0;
+ else
+ pwrb_context->alloc_index++;
+ spin_unlock_bh(&pwrb_context->wrb_lock);
+
+ return pwrb_handle;
}
/**
@@ -1201,30 +1211,32 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
* This happens under session_lock until submission to chip
*/
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
- struct hwi_wrb_context **pcontext)
+ struct hwi_wrb_context **pcontext)
{
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
- struct wrb_handle *pwrb_handle;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
- if (pwrb_context->wrb_handles_available >= 2) {
- pwrb_handle = pwrb_context->pwrb_handle_base[
- pwrb_context->alloc_index];
- pwrb_context->wrb_handles_available--;
- if (pwrb_context->alloc_index ==
- (phba->params.wrbs_per_cxn - 1))
- pwrb_context->alloc_index = 0;
- else
- pwrb_context->alloc_index++;
+ /* return the context address */
+ *pcontext = pwrb_context;
+ return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
+}
- /* Return the context address */
- *pcontext = pwrb_context;
- } else
- pwrb_handle = NULL;
- return pwrb_handle;
+static inline void
+beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
+ struct wrb_handle *pwrb_handle,
+ unsigned int wrbs_per_cxn)
+{
+ spin_lock_bh(&pwrb_context->wrb_lock);
+ pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ if (pwrb_context->free_index == (wrbs_per_cxn - 1))
+ pwrb_context->free_index = 0;
+ else
+ pwrb_context->free_index++;
+ spin_unlock_bh(&pwrb_context->wrb_lock);
}
/**
@@ -1239,13 +1251,9 @@ static void
free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle)
{
- pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
- pwrb_context->wrb_handles_available++;
- if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
- pwrb_context->free_index = 0;
- else
- pwrb_context->free_index++;
-
+ beiscsi_put_wrb_handle(pwrb_context,
+ pwrb_handle,
+ phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
@@ -1258,6 +1266,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ spin_lock_bh(&phba->mgmt_sgl_lock);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1275,13 +1284,14 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
-
+ spin_lock_bh(&phba->mgmt_sgl_lock);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1296,6 +1306,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1305,6 +1316,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
}
static void
@@ -2029,7 +2041,7 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
phwi_ctrlr, cri_index));
}
-static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
{
struct be_queue_info *mcc_cq;
struct be_mcc_compl *mcc_compl;
@@ -2039,31 +2051,15 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
mcc_compl = queue_tail_node(mcc_cq);
mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
-
if (num_processed >= 32) {
hwi_ring_cq_db(phba, mcc_cq->id,
- num_processed, 0, 0);
+ num_processed, 0);
num_processed = 0;
}
if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(mcc_compl->flags))
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) mcc_compl);
- else {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
- "BM_%d : Unsupported Async Event, flags"
- " = 0x%08x\n",
- mcc_compl->flags);
- if (phba->state & BE_ADAPTER_LINK_UP) {
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- }
- }
+ beiscsi_process_async_event(phba, mcc_compl);
} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
+ beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
}
mcc_compl->flags = 0;
@@ -2074,24 +2070,24 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
}
if (num_processed > 0)
- hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
-
+ hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
}
/**
* beiscsi_process_cq()- Process the Completion Queue
* @pbe_eq: Event Q on which the Completion has come
+ * @budget: Max number of events to processed
*
* return
* Number of Completion Entries processed.
**/
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
struct dmsg_cqe *dmsg;
+ unsigned int total = 0;
unsigned int num_processed = 0;
- unsigned int tot_nump = 0;
unsigned short code = 0, cid = 0;
uint16_t cri_index = 0;
struct beiscsi_conn *beiscsi_conn;
@@ -2142,12 +2138,12 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
- if (num_processed >= 32) {
- hwi_ring_cq_db(phba, cq->id,
- num_processed, 0, 0);
- tot_nump += num_processed;
+ /* replenish cq */
+ if (num_processed == 32) {
+ hwi_ring_cq_db(phba, cq->id, 32, 0);
num_processed = 0;
}
+ total++;
switch (code) {
case SOL_CMD_COMPLETE:
@@ -2192,7 +2188,13 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Ignoring %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
break;
+ case CXN_KILLED_HDR_DIGEST_ERR:
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+ break;
case CMD_KILLED_INVALID_STATSN_RCVD:
case CMD_KILLED_INVALID_R2T_RCVD:
case CMD_CXN_KILLED_LUN_INVALID:
@@ -2218,7 +2220,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
case CXN_KILLED_BURST_LEN_MISMATCH:
case CXN_KILLED_AHS_RCVD:
- case CXN_KILLED_HDR_DIGEST_ERR:
case CXN_KILLED_UNKNOWN_HDR:
case CXN_KILLED_STALE_ITT_TTT_RCVD:
case CXN_KILLED_INVALID_ITT_TTT_RCVD:
@@ -2253,13 +2254,12 @@ proc_next_cqe:
queue_tail_inc(cq);
sol = queue_tail_node(cq);
num_processed++;
+ if (total == budget)
+ break;
}
- if (num_processed > 0) {
- tot_nump += num_processed;
- hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
- }
- return tot_nump;
+ hwi_ring_cq_db(phba, cq->id, num_processed, 1);
+ return total;
}
void beiscsi_process_all_cqs(struct work_struct *work)
@@ -2279,14 +2279,14 @@ void beiscsi_process_all_cqs(struct work_struct *work)
spin_lock_irqsave(&phba->isr_lock, flags);
pbe_eq->todo_mcc_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_mcc_isr(phba);
+ beiscsi_process_mcc_cq(phba);
}
if (pbe_eq->todo_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
pbe_eq->todo_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_cq(pbe_eq);
+ beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
}
/* rearm EQ for further interrupts */
@@ -2295,20 +2295,36 @@ void beiscsi_process_all_cqs(struct work_struct *work)
static int be_iopoll(struct irq_poll *iop, int budget)
{
- unsigned int ret;
+ unsigned int ret, num_eq_processed;
struct beiscsi_hba *phba;
struct be_eq_obj *pbe_eq;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ num_eq_processed = 0;
pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
- ret = beiscsi_process_cq(pbe_eq);
+ phba = pbe_eq->phba;
+ eq = &pbe_eq->q;
+ eqe = queue_tail_node(eq);
+
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
+ EQE_VALID_MASK) {
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ }
+
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ ret = beiscsi_process_cq(pbe_eq, budget);
pbe_eq->cq_count += ret;
if (ret < budget) {
- phba = pbe_eq->phba;
irq_poll_complete(iop);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : rearm pbe_eq->q.id =%d\n",
- pbe_eq->q.id);
+ "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
+ pbe_eq->q.id, ret);
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
return ret;
@@ -2502,7 +2518,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
* @pwrb: ptr to the WRB entry
* @task: iscsi task which is to be executed
**/
-static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
+static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
{
struct iscsi_sge *psgl;
struct beiscsi_io_task *io_task = task->dd_data;
@@ -2534,6 +2550,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
task->data,
task->data_count,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(phba->pcidev,
+ io_task->mtask_addr))
+ return -ENOMEM;
io_task->mtask_data_count = task->data_count;
} else
io_task->mtask_addr = 0;
@@ -2578,6 +2597,7 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
}
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+ return 0;
}
/**
@@ -2706,8 +2726,10 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
phba->params.cxns_per_ctrl,
GFP_KERNEL);
- if (!phwi_ctrlr->wrb_context)
+ if (!phwi_ctrlr->wrb_context) {
+ kfree(phba->phwi_ctrlr);
return -ENOMEM;
+ }
phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
GFP_KERNEL);
@@ -2904,6 +2926,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
}
num_cxn_wrbh--;
}
+ spin_lock_init(&pwrb_context->wrb_lock);
}
idx = 0;
for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
@@ -3866,6 +3889,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_context->min_eqd = 0;
phwi_context->cur_eqd = 0;
be_cmd_fw_initialize(&phba->ctrl);
+ /* set optic state to unknown */
+ phba->optic_state = 0xff;
status = beiscsi_create_eqs(phba, phwi_context);
if (status != 0) {
@@ -4384,7 +4409,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
goto boot_freemem;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -4468,6 +4493,7 @@ put_shost:
scsi_host_put(phba->shost);
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
+ phba->boot_kset = NULL;
return -ENOMEM;
}
@@ -4607,11 +4633,9 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
}
if (io_task->psgl_handle) {
- spin_lock_bh(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba,
io_task->psgl_handle);
io_task->psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
}
if (io_task->mtask_addr) {
@@ -4657,9 +4681,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
}
if (io_task->psgl_handle) {
- spin_lock(&phba->io_sgl_lock);
free_io_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->io_sgl_lock);
io_task->psgl_handle = NULL;
}
@@ -4714,6 +4736,20 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
iowrite32(doorbell, phba->db_va +
beiscsi_conn->doorbell_offset);
+
+ /*
+ * There is no completion for CONTEXT_UPDATE. The completion of next
+ * WRB posted guarantees FW's processing and DMA'ing of it.
+ * Use beiscsi_put_wrb_handle to put it back in the pool which makes
+ * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
+ */
+ beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
+ phba->params.wrbs_per_cxn);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
+ pwrb_handle, pwrb_context->free_index,
+ pwrb_context->wrb_handles_available);
}
static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4761,9 +4797,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->pwrb_handle = NULL;
if (task->sc) {
- spin_lock(&phba->io_sgl_lock);
io_task->psgl_handle = alloc_io_sgl_handle(phba);
- spin_unlock(&phba->io_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4788,10 +4822,8 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
beiscsi_conn->task = task;
if (!beiscsi_conn->login_in_progress) {
- spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = (struct sgl_handle *)
alloc_mgmt_sgl_handle(phba);
- spin_unlock(&phba->mgmt_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
@@ -4830,9 +4862,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_conn->plogin_wrb_handle;
}
} else {
- spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
- spin_unlock(&phba->mgmt_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
@@ -4867,15 +4897,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
return 0;
free_io_hndls:
- spin_lock(&phba->io_sgl_lock);
free_io_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->io_sgl_lock);
goto free_hndls;
free_mgmt_hndls:
- spin_lock(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
io_task->psgl_handle = NULL;
- spin_unlock(&phba->mgmt_sgl_lock);
free_hndls:
phwi_ctrlr = phba->phwi_ctrlr;
cri_index = BE_GET_CRI_FROM_CID(
@@ -4903,7 +4929,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
pwrb = io_task->pwrb_handle->pwrb;
- io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
io_task->bhs_len = sizeof(struct be_cmd_bhs);
if (writedir) {
@@ -4964,7 +4989,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int doorbell = 0;
pwrb = io_task->pwrb_handle->pwrb;
- io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
io_task->bhs_len = sizeof(struct be_cmd_bhs);
if (writedir) {
@@ -5023,6 +5047,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
unsigned int doorbell = 0;
unsigned int cid;
unsigned int pwrb_typeoffset = 0;
+ int ret = 0;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
@@ -5071,7 +5096,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
case ISCSI_OP_LOGIN:
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
@@ -5091,19 +5116,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
dmsg, pwrb, 0);
}
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
default:
@@ -5114,6 +5139,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
return -EINVAL;
}
+ if (ret)
+ return ret;
+
/* Set the task type */
io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
@@ -5132,23 +5160,21 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
- struct beiscsi_hba *phba = NULL;
+ struct beiscsi_hba *phba;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
- phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+ if (!io_task->conn->login_in_progress)
+ task->hdr->exp_statsn = 0;
if (!sc)
return beiscsi_mtask(task);
io_task->scsi_cmnd = sc;
num_sg = scsi_dma_map(sc);
+ phba = io_task->conn->phba;
if (num_sg < 0) {
- struct iscsi_conn *conn = task->conn;
- struct beiscsi_hba *phba = NULL;
-
- phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
"BM_%d : scsi_dma_map Failed "
@@ -5211,12 +5237,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag],
+ phba->ctrl.mcc_tag_status[tag],
msecs_to_jiffies(
BEISCSI_HOST_MBX_TIMEOUT));
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- free_mcc_tag(&phba->ctrl, tag);
+ extd_status = (phba->ctrl.mcc_tag_status[tag] &
+ CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
+ status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
+ free_mcc_wrb(&phba->ctrl, tag);
resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
@@ -5277,15 +5304,12 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
if (phba->msix_enabled) {
for (i = 0; i <= phba->num_cpus; i++) {
msix_vec = phba->msix_entries[i].vector;
- synchronize_irq(msix_vec);
free_irq(msix_vec, &phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
} else
- if (phba->pcidev->irq) {
- synchronize_irq(phba->pcidev->irq);
+ if (phba->pcidev->irq)
free_irq(phba->pcidev->irq, phba);
- }
pci_disable_msix(phba->pcidev);
cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
@@ -5313,7 +5337,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
static void beiscsi_remove(struct pci_dev *pcidev)
{
-
struct beiscsi_hba *phba = NULL;
phba = pci_get_drvdata(pcidev);
@@ -5323,9 +5346,9 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
beiscsi_destroy_def_ifaces(phba);
- beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
pci_disable_pcie_error_reporting(pcidev);
@@ -5334,23 +5357,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
pci_disable_device(pcidev);
}
-static void beiscsi_shutdown(struct pci_dev *pcidev)
-{
-
- struct beiscsi_hba *phba = NULL;
-
- phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
- if (!phba) {
- dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
- return;
- }
-
- phba->state = BE_ADAPTER_STATE_SHUTDOWN;
- iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
- beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
- pci_disable_device(pcidev);
-}
-
static void beiscsi_msix_enable(struct beiscsi_hba *phba)
{
int i, status;
@@ -5413,7 +5419,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
if (num) {
tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
if (tag)
- beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
}
}
@@ -5564,11 +5570,17 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
phba->shost->max_id = phba->params.cxns_per_ctrl;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = hwi_init_controller(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_eeh_resume -"
+ "Failed to initialize beiscsi_hba.\n");
+ goto ret_err;
+ }
for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
- phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_status[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
}
@@ -5670,6 +5682,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto hba_free;
}
+ /*
+ * FUNCTION_RESET should clean up any stale info in FW for this fn
+ */
ret = beiscsi_cmd_reset_function(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5693,6 +5708,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
"BM_%d : Error getting fw config\n");
goto free_port;
}
+ mgmt_get_port_name(&phba->ctrl, phba);
+ beiscsi_get_params(phba);
if (enable_msix)
find_num_cpus(phba);
@@ -5710,7 +5727,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
phba->shost->max_id = phba->params.cxns_per_ctrl;
- beiscsi_get_params(phba);
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
@@ -5723,7 +5739,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
- phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_status[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
sizeof(struct be_dma_mem));
@@ -5857,7 +5873,6 @@ static struct pci_driver beiscsi_pci_driver = {
.name = DRV_NAME,
.probe = beiscsi_dev_probe,
.remove = beiscsi_remove,
- .shutdown = beiscsi_shutdown,
.id_table = beiscsi_pci_id_table,
.err_handler = &beiscsi_eeh_handlers
};
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5c67c0732241..30a4606d9a3b 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.6.0.1"
+#define BUILD_STR "11.0.0.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -63,6 +63,7 @@
#define BE2_SGE 32
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
+#define BE2_MAX_NUM_CQ_PROC 512
#define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7
@@ -103,8 +104,7 @@
#define BE_ADAPTER_LINK_UP 0x001
#define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
-#define BE_ADAPTER_STATE_SHUTDOWN 0x008
-#define BE_ADAPTER_CHECK_BOOT 0x010
+#define BE_ADAPTER_CHECK_BOOT 0x008
#define BEISCSI_CLEAN_UNLOAD 0x01
@@ -304,6 +304,7 @@ struct invalidate_command_table {
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
+ spinlock_t wrb_lock;
struct list_head wrb_handle_list;
struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
@@ -398,7 +399,9 @@ struct beiscsi_hba {
* group together since they are used most frequently
* for cid to cri conversion
*/
+#define BEISCSI_PHYS_PORT_MAX 4
unsigned int phys_port;
+ /* valid values of phys_port id are 0, 1, 2, 3 */
unsigned int eqid_count;
unsigned int cqid_count;
unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
@@ -416,6 +419,7 @@ struct beiscsi_hba {
} fw_config;
unsigned int state;
+ u8 optic_state;
int get_boot;
bool fw_timeout;
bool ue_detected;
@@ -423,6 +427,8 @@ struct beiscsi_hba {
bool mac_addr_set;
u8 mac_address[ETH_ALEN];
+ u8 port_name;
+ u8 port_speed;
char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
@@ -845,9 +851,10 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
- unsigned char rearm, unsigned char event);
+ unsigned char rearm);
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
@@ -1074,12 +1081,14 @@ struct hwi_context_memory {
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
+#define __beiscsi_log(phba, level, fmt, arg...) \
+ shost_printk(level, phba->shost, fmt, __LINE__, ##arg)
+
#define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \
uint32_t log_value = phba->attr_log_enable; \
if (((mask) & log_value) || (level[1] <= '3')) \
- shost_printk(level, phba->shost, \
- fmt, __LINE__, ##arg); \
-} while (0)
+ __beiscsi_log(phba, level, fmt, ##arg); \
+} while (0);
#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aea3e6b9477d..83926e221f1e 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -161,20 +161,17 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
- unsigned int tag = 0;
+ unsigned int tag;
int i;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
-
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
@@ -187,8 +184,8 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -209,22 +206,20 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_reopen_session_req *req;
- unsigned int tag = 0;
+ unsigned int tag;
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
@@ -234,8 +229,8 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
req->reopen_type = reopen_type;
req->session_handle = sess_handle;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -244,29 +239,27 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_get_boot_target_req *req;
- unsigned int tag = 0;
+ unsigned int tag;
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
sizeof(struct be_cmd_get_boot_target_resp));
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -276,7 +269,7 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
- unsigned int tag = 0;
+ unsigned int tag;
struct be_cmd_get_session_req *req;
struct be_cmd_get_session_resp *resp;
struct be_sge *sge;
@@ -285,22 +278,17 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In beiscsi_get_session_info\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
nonemb_cmd->size = sizeof(*resp);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
- wrb->tag0 |= tag;
-
-
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
@@ -310,12 +298,54 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
/**
+ * mgmt_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ int ret = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_port_name *ioctl;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ret = be_mbox_notify(ctrl);
+ phba->port_name = 0;
+ if (!ret) {
+ phba->port_name = ioctl->p.resp.port_names >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+ ret, ioctl->h.resp_hdr.status);
+ }
+
+ if (phba->port_name == 0)
+ phba->port_name = '?';
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
+/**
* mgmt_get_fw_config()- Get the FW config for the function
* @ctrl: ptr to Ctrl Info
* @phba: ptr to the dev priv structure
@@ -331,91 +361,147 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_fw_cfg *req = embedded_payload(wrb);
- int status = 0;
+ struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+ uint32_t cid_count, icd_count;
+ int status = -EINVAL;
+ uint8_t ulp_num = 0;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
EMBED_MBX_MAX_PAYLOAD_SIZE);
- status = be_mbox_notify(ctrl);
- if (!status) {
- uint8_t ulp_num = 0;
- struct be_fw_cfg *pfw_cfg;
- pfw_cfg = req;
- if (!is_chip_be2_be3r(phba)) {
- phba->fw_config.eqid_count = pfw_cfg->eqid_count;
- phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+ if (be_mbox_notify(ctrl)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_get_fw_config\n");
+ goto fail_init;
+ }
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_INIT,
- "BG_%d : EQ_Count : %d CQ_Count : %d\n",
- phba->fw_config.eqid_count,
+ /* FW response formats depend on port id */
+ phba->fw_config.phys_port = pfw_cfg->phys_port;
+ if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid physical port id %d\n",
+ phba->fw_config.phys_port);
+ goto fail_init;
+ }
+
+ /* populate and check FW config against min and max values */
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+ if (phba->fw_config.eqid_count == 0 ||
+ phba->fw_config.eqid_count > 2048) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid EQ count %d\n",
+ phba->fw_config.eqid_count);
+ goto fail_init;
+ }
+ if (phba->fw_config.cqid_count == 0 ||
+ phba->fw_config.cqid_count > 4096) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid CQ count %d\n",
phba->fw_config.cqid_count);
+ goto fail_init;
}
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
- if (pfw_cfg->ulp[ulp_num].ulp_mode &
- BEISCSI_ULP_ISCSI_INI_MODE)
- set_bit(ulp_num,
- &phba->fw_config.ulp_supported);
-
- phba->fw_config.phys_port = pfw_cfg->phys_port;
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
- if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
- phba->fw_config.iscsi_cid_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_base;
- phba->fw_config.iscsi_cid_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_count;
-
- phba->fw_config.iscsi_icd_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_base;
- phba->fw_config.iscsi_icd_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_count;
-
- phba->fw_config.iscsi_chain_start[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_base;
- phba->fw_config.iscsi_chain_count[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_count;
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : Function loaded on ULP : %d\n"
- "\tiscsi_cid_count : %d\n"
- "\tiscsi_cid_start : %d\n"
- "\t iscsi_icd_count : %d\n"
- "\t iscsi_icd_start : %d\n",
- ulp_num,
- phba->fw_config.
- iscsi_cid_count[ulp_num],
- phba->fw_config.
- iscsi_cid_start[ulp_num],
- phba->fw_config.
- iscsi_icd_count[ulp_num],
- phba->fw_config.
- iscsi_icd_start[ulp_num]);
- }
+ /**
+ * Check on which all ULP iSCSI Protocol is loaded.
+ * Set the Bit for those ULP. This set flag is used
+ * at all places in the code to check on which ULP
+ * iSCSi Protocol is loaded
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE) {
+ set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+ /* Get the CID, ICD and Chain count for each ULP */
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
}
+ }
- phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
- BEISCSI_FUNC_DUA_MODE);
+ if (phba->fw_config.ulp_supported == 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+ pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+ pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+ goto fail_init;
+ }
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : DUA Mode : 0x%x\n",
- phba->fw_config.dual_ulp_aware);
+ /**
+ * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ if (icd_count == 0 || icd_count > 65536) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d: invalid ICD count %d\n", icd_count);
+ goto fail_init;
+ }
- } else {
+ cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+ if (cid_count == 0 || cid_count > 4096) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : Failed in mgmt_get_fw_config\n");
- status = -EINVAL;
+ "BG_%d: invalid CID count %d\n", cid_count);
+ goto fail_init;
}
- spin_unlock(&ctrl->mbox_lock);
+ /**
+ * Check FW is dual ULP aware i.e. can handle either
+ * of the protocols.
+ */
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
+ /* all set, continue using this FW config */
+ status = 0;
+fail_init:
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -440,7 +526,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
memset(req, 0, sizeof(*req));
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
@@ -470,7 +556,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
} else
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_check_supported_fw\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -501,8 +587,9 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
req->region = region;
req->sector = sector;
req->offset = offset;
- spin_lock(&ctrl->mbox_lock);
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
case BEISCSI_WRITE_FLASH:
offset = sector * sector_size + offset;
@@ -521,28 +608,26 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
"BG_%d : Unsupported cmd = 0x%x\n\n",
bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return -ENOSYS;
}
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
mcc_sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
job->request_payload.sg_cnt);
mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
- wrb->tag0 |= tag;
- be_mcc_notify(phba);
+ be_mcc_notify(phba, tag);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -558,12 +643,19 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct iscsi_cleanup_req *req = embedded_payload(wrb);
- int status = 0;
+ struct be_mcc_wrb *wrb;
+ struct iscsi_cleanup_req *req;
+ unsigned int tag;
+ int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return -EBUSY;
+ }
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
@@ -572,11 +664,12 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
- status = be_mcc_notify_wait(phba);
+ be_mcc_notify(phba, tag);
+ status = be_mcc_compl_poll(phba, tag);
if (status)
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
"BG_%d : mgmt_epfw_cleanup , FAILED\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -590,20 +683,18 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct be_mcc_wrb *wrb;
struct be_sge *sge;
struct invalidate_commands_params_in *req;
- unsigned int i, tag = 0;
+ unsigned int i, tag;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -621,8 +712,8 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -637,16 +728,14 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct iscsi_invalidate_connection_params_in *req;
unsigned int tag = 0;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- wrb->tag0 |= tag;
- req = embedded_payload(wrb);
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
@@ -658,8 +747,8 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -669,25 +758,23 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct tcp_upload_params_in *req;
- unsigned int tag = 0;
+ unsigned int tag;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- req = embedded_payload(wrb);
- wrb->tag0 |= tag;
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -722,6 +809,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
unsigned short cid = beiscsi_ep->ep_cid;
struct be_sge *sge;
+ if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : unknown addr family %d\n",
+ dst_addr->sa_family);
+ return -EINVAL;
+ }
+
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -732,18 +826,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- sge = nonembedded_sgl(wrb);
+ sge = nonembedded_sgl(wrb);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -760,7 +853,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
beiscsi_ep->ip_type = BE2_IPV4;
- } else if (dst_addr->sa_family == PF_INET6) {
+ } else {
+ /* else its PF_INET6 family */
req->ip_address.ip_type = BE2_IPV6;
memcpy(&req->ip_address.addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
@@ -769,14 +863,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
memcpy(&beiscsi_ep->dst6_addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
- } else{
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BG_%d : unknown addr family %d\n",
- dst_addr->sa_family);
- spin_unlock(&ctrl->mbox_lock);
- free_mcc_tag(&phba->ctrl, tag);
- return -EINVAL;
-
}
req->cid = cid;
i = phba->nxt_cqid++;
@@ -801,35 +887,45 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
req->tcp_window_scale_count = 2;
}
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
- struct be_cmd_get_all_if_id_req *pbe_allid = req;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_all_if_id_req *req;
+ struct be_cmd_get_all_if_id_req *pbe_allid;
+ unsigned int tag;
int status = 0;
- memset(wrb, 0, sizeof(*wrb));
-
- spin_lock(&ctrl->mbox_lock);
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return -EINTR;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return -ENOMEM;
+ }
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
sizeof(*req));
- status = be_mbox_notify(ctrl);
- if (!status)
- phba->interface_handle = pbe_allid->if_hndl_list[0];
- else {
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+
+ status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+ if (status) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed in mgmt_get_all_if_id\n");
+ return -EBUSY;
}
- spin_unlock(&ctrl->mbox_lock);
+
+ pbe_allid = embedded_payload(wrb);
+ phba->interface_handle = pbe_allid->if_hndl_list[0];
return status;
}
@@ -852,27 +948,24 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
unsigned int tag;
int rc = 0;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
rc = -ENOMEM;
goto free_cmd;
}
- wrb = wrb_from_mccq(phba);
- wrb->tag0 |= tag;
sge = nonembedded_sgl(wrb);
-
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
@@ -1003,8 +1096,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
uint32_t ip_type;
int rc;
- if (mgmt_get_all_if_id(phba))
- return -EIO;
+ rc = mgmt_get_all_if_id(phba);
+ if (rc)
+ return rc;
ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
BE2_IPV6 : BE2_IPV4 ;
@@ -1173,8 +1267,9 @@ int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc;
- if (mgmt_get_all_if_id(phba))
- return -EIO;
+ rc = mgmt_get_all_if_id(phba);
+ if (rc)
+ return rc;
do {
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
@@ -1245,55 +1340,27 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
{
- unsigned int tag = 0;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_hba_name *req;
struct be_ctrl_info *ctrl = &phba->ctrl;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
sizeof(*req));
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
-{
- unsigned int tag = 0;
- struct be_mcc_wrb *wrb;
- struct be_cmd_ntwk_link_status_req *req;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
- }
-
- wrb = wrb_from_mccq(phba);
- req = embedded_payload(wrb);
- wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
- sizeof(*req));
-
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -1330,7 +1397,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
return -EAGAIN;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1364,7 +1431,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
return -EAGAIN;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1406,7 +1473,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
@@ -1749,19 +1816,17 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_logout_fwboot_sess\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : MBX Tag Failure\n");
return -EINVAL;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
@@ -1769,10 +1834,10 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
/* Set the session handle */
req->session_handle = fw_sess_handle;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index c1dbb690ee27..f3a48a04b2ca 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -268,6 +268,8 @@ struct beiscsi_endpoint {
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 2ea0db4b62a7..7209afad82f7 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -91,6 +91,25 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
+{
+ int tail = trcm->tail;
+ struct bfa_trc_s *trc = &trcm->trc[tail];
+
+ if (trcm->stopped)
+ return;
+
+ trc->fileno = (u16) fileno;
+ trc->line = (u16) line;
+ trc->data.u64 = data;
+ trc->timestamp = BFA_TRC_TS(trcm);
+
+ trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+ if (trcm->tail == trcm->head)
+ trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+}
+
static void
bfa_com_port_attach(struct bfa_s *bfa)
{
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index da9cf655be26..df6760ca0911 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -108,44 +108,11 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm)
trcm->stopped = 1;
}
-static inline void
-__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
-{
- int tail = trcm->tail;
- struct bfa_trc_s *trc = &trcm->trc[tail];
-
- if (trcm->stopped)
- return;
-
- trc->fileno = (u16) fileno;
- trc->line = (u16) line;
- trc->data.u64 = data;
- trc->timestamp = BFA_TRC_TS(trcm);
-
- trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
- if (trcm->tail == trcm->head)
- trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-}
-
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data);
-static inline void
-__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
-{
- int tail = trcm->tail;
- struct bfa_trc_s *trc = &trcm->trc[tail];
-
- if (trcm->stopped)
- return;
-
- trc->fileno = (u16) fileno;
- trc->line = (u16) line;
- trc->data.u32.u32 = data;
- trc->timestamp = BFA_TRC_TS(trcm);
-
- trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
- if (trcm->tail == trcm->head)
- trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-}
+void
+__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data);
#define bfa_sm_fault(__mod, __event) do { \
bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 251e2ff8ff5f..a1ada4a31c97 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2803,7 +2803,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 67405c628864..d7029ea5d319 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -97,6 +97,15 @@ static void __exit bnx2fc_mod_exit(void);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging,
+ "Option to enable extended logging,\n"
+ "\t\tDefault is 0 - no logging.\n"
+ "\t\t0x01 - SCSI cmd error, cleanup.\n"
+ "\t\t0x02 - Session setup, cleanup, etc.\n"
+ "\t\t0x04 - lport events, link, mtu, etc.\n"
+ "\t\t0x08 - ELS logs.\n"
+ "\t\t0x10 - fcoe L2 fame related logs.\n"
+ "\t\t0xff - LOG all messages.");
static int bnx2fc_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0002caf687dd..2230dab67ca5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1104,8 +1104,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
- int rc = FAILED;
-
+ int rc;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1114,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
lport = shost_priv(sc_cmd->device->host);
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
printk(KERN_ERR PFX "eh_abort: link not ready\n");
- return rc;
+ return FAILED;
}
tgt = (struct bnx2fc_rport *)&rp[1];
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 804806e1cbb4..339f6b7f4803 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
@@ -158,7 +159,6 @@ static struct scsi_transport_template *cxgb4i_stt;
* open/close/abort and data send/receive.
*/
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 256
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 5ada9268a450..6e6815545a71 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops;
sectors
*/
-#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
@@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops;
index derivation
*/
-#define CXLFLASH_MAX_CMDS 16
+#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
+/* RRQ for master issued cmds */
+#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
+
static inline void check_sizes(void)
{
@@ -106,7 +108,6 @@ struct cxlflash_cfg {
atomic_t scan_host_needed;
struct cxl_afu *cxl_afu;
- struct pci_dev *parent_dev;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -149,7 +150,7 @@ struct afu_cmd {
struct afu {
/* Stuff requiring alignment go first. */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/*
* Command & data for AFU commands.
*/
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index f6d90ce8f3b7..35968bdb4866 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -726,11 +726,11 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
*/
static void term_afu(struct cxlflash_cfg *cfg)
{
- term_mc(cfg, UNDO_START);
-
if (cfg->afu)
stop_afu(cfg);
+ term_mc(cfg, UNDO_START);
+
pr_debug("%s: returning\n", __func__);
}
@@ -767,7 +767,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
cancel_work_sync(&cfg->work_q);
term_afu(cfg);
case INIT_STATE_PCI:
- pci_release_regions(cfg->dev);
pci_disable_device(pdev);
case INIT_STATE_NONE:
free_mem(cfg);
@@ -840,15 +839,6 @@ static int init_pci(struct cxlflash_cfg *cfg)
struct pci_dev *pdev = cfg->dev;
int rc = 0;
- cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
- rc = pci_request_regions(pdev, CXLFLASH_NAME);
- if (rc < 0) {
- dev_err(&pdev->dev,
- "%s: Couldn't register memory range of registers\n",
- __func__);
- goto out;
- }
-
rc = pci_enable_device(pdev);
if (rc || pci_channel_offline(pdev)) {
if (pci_channel_offline(pdev)) {
@@ -860,55 +850,13 @@ static int init_pci(struct cxlflash_cfg *cfg)
dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
__func__);
cxlflash_wait_for_pci_err_recovery(cfg);
- goto out_release_regions;
- }
- }
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc < 0) {
- dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
- __func__);
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- if (rc < 0) {
- dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
- __func__);
- goto out_disable;
- }
-
- pci_set_master(pdev);
-
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- if (pci_channel_offline(pdev)) {
- rc = -EIO;
- goto out_msi_disable;
+ goto out;
}
}
- rc = pci_save_state(pdev);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
- __func__);
- rc = -EIO;
- goto cleanup_nolog;
- }
-
out:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
-
-cleanup_nolog:
-out_msi_disable:
- cxlflash_wait_for_pci_err_recovery(cfg);
-out_disable:
- pci_disable_device(pdev);
-out_release_regions:
- pci_release_regions(pdev);
- goto out;
-
}
/**
@@ -1407,7 +1355,7 @@ static int start_context(struct cxlflash_cfg *cfg)
*/
static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
- struct pci_dev *dev = cfg->parent_dev;
+ struct pci_dev *dev = cfg->dev;
int rc = 0;
int ro_start, ro_size, i, j, k;
ssize_t vpd_size;
@@ -1416,7 +1364,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
/* Get the VPD data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -2149,6 +2097,16 @@ static ssize_t lun_mode_store(struct device *dev,
rc = kstrtouint(buf, 10, &lun_mode);
if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
afu->internal_lun = lun_mode;
+
+ /*
+ * When configured for internal LUN, there is only one channel,
+ * channel number 0, else there will be 2 (default).
+ */
+ if (afu->internal_lun)
+ shost->max_channel = 0;
+ else
+ shost->max_channel = NUM_FC_PORTS - 1;
+
afu_reset(cfg);
scsi_scan_host(cfg->host);
}
@@ -2295,7 +2253,7 @@ static struct scsi_host_template driver_template = {
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = 16,
+ .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
.this_id = -1,
.sg_tablesize = SG_NONE, /* No scatter gather support */
@@ -2392,7 +2350,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
{
struct Scsi_Host *host;
struct cxlflash_cfg *cfg = NULL;
- struct device *phys_dev;
struct dev_dependent_vals *ddv;
int rc = 0;
@@ -2458,19 +2415,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- /*
- * Use the special service provided to look up the physical
- * PCI device, since we are called on the probe of the virtual
- * PCI host bus (vphb)
- */
- phys_dev = cxl_get_phys_dev(pdev);
- if (!dev_is_pci(phys_dev)) {
- dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
- rc = -ENODEV;
- goto out_remove;
- }
- cfg->parent_dev = to_pci_dev(phys_dev);
-
cfg->cxl_afu = cxl_pci_to_afu(pdev);
rc = init_pci(cfg);
@@ -2544,8 +2488,8 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
if (unlikely(rc))
dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
__func__, rc);
- term_mc(cfg, UNDO_START);
stop_afu(cfg);
+ term_mc(cfg, UNDO_START);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index f4020dbb55c3..d8a5cb3cd2bd 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -709,27 +709,32 @@ int cxlflash_disk_release(struct scsi_device *sdev,
* @cfg: Internal structure associated with the host.
* @ctxi: Context to release.
*
- * Note that the rht_lun member of the context was cut from a single
- * allocation when the context was created and therefore does not need
- * to be explicitly freed. Also note that we conditionally check for the
- * existence of the context control map before clearing the RHT registers
- * and context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was removed
- * [so we don't have to worry about clearing] and context is waiting for
- * a new mapping).
+ * This routine is safe to be called with a a non-initialized context
+ * and is tolerant of being called with the context's mutex held (it
+ * will be unlocked if necessary before freeing). Also note that the
+ * routine conditionally checks for the existence of the context control
+ * map before clearing the RHT registers and context capabilities because
+ * it is possible to destroy a context while the context is in the error
+ * state (previous mapping was removed [so there is no need to worry about
+ * clearing] and context is waiting for a new mapping).
*/
static void destroy_context(struct cxlflash_cfg *cfg,
struct ctx_info *ctxi)
{
struct afu *afu = cfg->afu;
- WARN_ON(!list_empty(&ctxi->luns));
+ if (ctxi->initialized) {
+ WARN_ON(!list_empty(&ctxi->luns));
- /* Clear RHT registers and drop all capabilities for this context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+ /* Clear RHT registers and drop all capabilities for context */
+ if (afu->afu_map && ctxi->ctrl_map) {
+ writeq_be(0, &ctxi->ctrl_map->rht_start);
+ writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
+ writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+ }
+
+ if (mutex_is_locked(&ctxi->mutex))
+ mutex_unlock(&ctxi->mutex);
}
/* Free memory associated with context */
@@ -742,23 +747,12 @@ static void destroy_context(struct cxlflash_cfg *cfg,
/**
* create_context() - allocates and initializes a context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @adap_fd: Previously obtained adapter fd associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- *
- * The context's mutex is locked when an allocated context is returned.
*
* Return: Allocated context on success, NULL on failure
*/
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid,
- int adap_fd, struct file *file,
- u32 perms)
+static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
{
struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
struct ctx_info *ctxi = NULL;
struct llun_info **lli = NULL;
u8 *ws = NULL;
@@ -781,28 +775,49 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
ctxi->rht_lun = lli;
ctxi->rht_needs_ws = ws;
ctxi->rht_start = rhte;
- ctxi->rht_perms = perms;
+out:
+ return ctxi;
+
+err:
+ kfree(ws);
+ kfree(lli);
+ kfree(ctxi);
+ ctxi = NULL;
+ goto out;
+}
+
+/**
+ * init_context() - initializes a previously allocated context
+ * @ctxi: Previously allocated context
+ * @cfg: Internal structure associated with the host.
+ * @ctx: Previously obtained CXL context reference.
+ * @ctxid: Previously obtained process element associated with CXL context.
+ * @adap_fd: Previously obtained adapter fd associated with CXL context.
+ * @file: Previously obtained file associated with CXL context.
+ * @perms: User-specified permissions.
+ *
+ * Upon return, the context is marked as initialized and the context's mutex
+ * is locked.
+ */
+static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
+ struct cxl_context *ctx, int ctxid, int adap_fd,
+ struct file *file, u32 perms)
+{
+ struct afu *afu = cfg->afu;
+ ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
ctxi->lfd = adap_fd;
ctxi->pid = current->tgid; /* tgid = pid */
ctxi->ctx = ctx;
ctxi->file = file;
+ ctxi->initialized = true;
mutex_init(&ctxi->mutex);
INIT_LIST_HEAD(&ctxi->luns);
INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
mutex_lock(&ctxi->mutex);
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
}
/**
@@ -1300,9 +1315,9 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
u32 perms;
int ctxid = -1;
u64 rctxid = 0UL;
- struct file *file;
+ struct file *file = NULL;
- struct cxl_context *ctx;
+ struct cxl_context *ctx = NULL;
int fd = -1;
@@ -1356,7 +1371,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
if (unlikely(!lun_access)) {
dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
rc = -ENOMEM;
- goto err0;
+ goto err;
}
lun_access->lli = lli;
@@ -1371,53 +1386,56 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto out_attach;
}
+ ctxi = create_context(cfg);
+ if (unlikely(!ctxi)) {
+ dev_err(dev, "%s: Failed to create context! (%d)\n",
+ __func__, ctxid);
+ goto err;
+ }
+
ctx = cxl_dev_context_init(cfg->dev);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
rc = -ENODEV;
- goto err1;
+ goto err;
+ }
+
+ work = &ctxi->work;
+ work->num_interrupts = attach->num_interrupts;
+ work->flags = CXL_START_WORK_NUM_IRQS;
+
+ rc = cxl_start_work(ctx, work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err;
}
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
- goto err2;
+ goto err;
}
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
+ goto err;
}
/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
- ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context! (%d)\n",
- __func__, ctxid);
- goto err3;
- }
-
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err4;
- }
+ /* Context mutex is locked upon return */
+ init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err5;
+ goto err;
}
/*
@@ -1453,13 +1471,14 @@ out:
__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
return rc;
-err5:
- cxl_stop_context(ctx);
-err4:
- put_context(ctxi);
- destroy_context(cfg, ctxi);
- ctxi = NULL;
-err3:
+err:
+ /* Cleanup CXL context; okay to 'stop' even if it was not started */
+ if (!IS_ERR_OR_NULL(ctx)) {
+ cxl_stop_context(ctx);
+ cxl_release_context(ctx);
+ ctx = NULL;
+ }
+
/*
* Here, we're overriding the fops with a dummy all-NULL fops because
* fput() calls the release fop, which will cause us to mistakenly
@@ -1467,15 +1486,21 @@ err3:
* to that routine (cxlflash_cxl_release) we should try to fix the
* issue here.
*/
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
-err2:
- cxl_release_context(ctx);
-err1:
+ if (fd > 0) {
+ file->f_op = &null_fops;
+ fput(file);
+ put_unused_fd(fd);
+ fd = -1;
+ file = NULL;
+ }
+
+ /* Cleanup our context; safe to call even with mutex locked */
+ if (ctxi) {
+ destroy_context(cfg, ctxi);
+ ctxi = NULL;
+ }
+
kfree(lun_access);
-err0:
scsi_device_put(sdev);
goto out;
}
@@ -1507,24 +1532,24 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
goto out;
}
+ rc = cxl_start_work(ctx, &ctxi->work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
+
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
- goto err1;
+ goto err2;
}
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err1;
- }
-
- rc = cxl_start_work(ctx, &ctxi->work);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
goto err2;
}
@@ -1569,10 +1594,10 @@ out:
return rc;
err3:
- cxl_stop_context(ctx);
-err2:
fput(file);
put_unused_fd(fd);
+err2:
+ cxl_stop_context(ctx);
err1:
cxl_release_context(ctx);
goto out;
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index bede574bcd77..5f9a091fda95 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -102,6 +102,7 @@ struct ctx_info {
u64 ctxid;
int lfd;
pid_t pid;
+ bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index e5647d59224f..0b331c9c0a8f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -13,13 +13,13 @@ menuconfig SCSI_DH
config SCSI_DH_RDAC
tristate "LSI RDAC Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a LSI RDAC select y. Otherwise, say N.
config SCSI_DH_HP_SW
tristate "HP/COMPAQ MSA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a HP/COMPAQ MSA device that requires START_STOP to
be sent to start it and cannot upgrade the firmware then select y.
@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
config SCSI_DH_EMC
tristate "EMC CLARiiON Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
tristate "SPC-3 ALUA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 5a328bf81836..5bcdf8dd6fb0 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -24,20 +24,13 @@
#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#define ALUA_DH_NAME "alua"
-#define ALUA_DH_VER "1.3"
-
-#define TPGS_STATE_OPTIMIZED 0x0
-#define TPGS_STATE_NONOPTIMIZED 0x1
-#define TPGS_STATE_STANDBY 0x2
-#define TPGS_STATE_UNAVAILABLE 0x3
-#define TPGS_STATE_LBA_DEPENDENT 0x4
-#define TPGS_STATE_OFFLINE 0xe
-#define TPGS_STATE_TRANSITIONING 0xf
+#define ALUA_DH_VER "2.0"
#define TPGS_SUPPORT_NONE 0x00
#define TPGS_SUPPORT_OPTIMIZED 0x01
@@ -56,27 +49,62 @@
#define TPGS_MODE_IMPLICIT 0x1
#define TPGS_MODE_EXPLICIT 0x2
-#define ALUA_INQUIRY_SIZE 36
+#define ALUA_RTPG_SIZE 128
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
+#define ALUA_RTPG_DELAY_MSECS 5
/* device handler flags */
-#define ALUA_OPTIMIZE_STPG 1
-#define ALUA_RTPG_EXT_HDR_UNSUPP 2
+#define ALUA_OPTIMIZE_STPG 0x01
+#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
+#define ALUA_SYNC_STPG 0x04
+/* State machine flags */
+#define ALUA_PG_RUN_RTPG 0x10
+#define ALUA_PG_RUN_STPG 0x20
+#define ALUA_PG_RUNNING 0x40
-struct alua_dh_data {
+static uint optimize_stpg;
+module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
+
+static LIST_HEAD(port_group_list);
+static DEFINE_SPINLOCK(port_group_lock);
+static struct workqueue_struct *kaluad_wq;
+static struct workqueue_struct *kaluad_sync_wq;
+
+struct alua_port_group {
+ struct kref kref;
+ struct rcu_head rcu;
+ struct list_head node;
+ struct list_head dh_list;
+ unsigned char device_id_str[256];
+ int device_id_len;
int group_id;
- int rel_port;
int tpgs;
int state;
int pref;
unsigned flags; /* used for optimizing STPG */
- unsigned char inq[ALUA_INQUIRY_SIZE];
- unsigned char *buff;
- int bufflen;
unsigned char transition_tmo;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ unsigned long expiry;
+ unsigned long interval;
+ struct delayed_work rtpg_work;
+ spinlock_t lock;
+ struct list_head rtpg_list;
+ struct scsi_device *rtpg_sdev;
+};
+
+struct alua_dh_data {
+ struct list_head node;
+ struct alua_port_group *pg;
+ int group_id;
+ spinlock_t pg_lock;
struct scsi_device *sdev;
+ int init_error;
+ struct mutex init_mutex;
+};
+
+struct alua_queue_data {
+ struct list_head entry;
activate_complete callback_fn;
void *callback_data;
};
@@ -84,179 +112,160 @@ struct alua_dh_data {
#define ALUA_POLICY_SWITCH_CURRENT 0
#define ALUA_POLICY_SWITCH_ALL 1
-static char print_alua_state(int);
+static void alua_rtpg_work(struct work_struct *work);
+static void alua_rtpg_queue(struct alua_port_group *pg,
+ struct scsi_device *sdev,
+ struct alua_queue_data *qdata, bool force);
+static void alua_check(struct scsi_device *sdev, bool force);
-static int realloc_buffer(struct alua_dh_data *h, unsigned len)
+static void release_port_group(struct kref *kref)
{
- if (h->buff && h->buff != h->inq)
- kfree(h->buff);
-
- h->buff = kmalloc(len, GFP_NOIO);
- if (!h->buff) {
- h->buff = h->inq;
- h->bufflen = ALUA_INQUIRY_SIZE;
- return 1;
- }
- h->bufflen = len;
- return 0;
-}
-
-static struct request *get_alua_req(struct scsi_device *sdev,
- void *buffer, unsigned buflen, int rw)
-{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
-
- rq = blk_get_request(q, rw, GFP_NOIO);
-
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev,
- "%s: blk_get_request failed\n", __func__);
- return NULL;
- }
- blk_rq_set_block_pc(rq);
-
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
- blk_put_request(rq);
- sdev_printk(KERN_INFO, sdev,
- "%s: blk_rq_map_kern failed\n", __func__);
- return NULL;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->retries = ALUA_FAILOVER_RETRIES;
- rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
-
- return rq;
+ struct alua_port_group *pg;
+
+ pg = container_of(kref, struct alua_port_group, kref);
+ if (pg->rtpg_sdev)
+ flush_delayed_work(&pg->rtpg_work);
+ spin_lock(&port_group_lock);
+ list_del(&pg->node);
+ spin_unlock(&port_group_lock);
+ kfree_rcu(pg, rcu);
}
/*
* submit_rtpg - Issue a REPORT TARGET GROUP STATES command
* @sdev: sdev the command should be sent to
*/
-static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
+ int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
- struct request *rq;
- int err = 0;
-
- rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
- if (!rq) {
- err = DRIVER_BUSY << 24;
- goto done;
- }
+ u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
+ int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
/* Prepare the command. */
- rq->cmd[0] = MAINTENANCE_IN;
- if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP))
- rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
+ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
+ cdb[0] = MAINTENANCE_IN;
+ if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
+ cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
else
- rq->cmd[1] = MI_REPORT_TARGET_PGS;
- put_unaligned_be32(h->bufflen, &rq->cmd[6]);
- rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- blk_execute_rq(rq->q, NULL, rq, 1);
- if (rq->errors)
- err = rq->errors;
- blk_put_request(rq);
-done:
- return err;
+ cdb[1] = MI_REPORT_TARGET_PGS;
+ put_unaligned_be32(bufflen, &cdb[6]);
+
+ return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
+ buff, bufflen, sshdr,
+ ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, NULL, req_flags);
}
/*
- * stpg_endio - Evaluate SET TARGET GROUP STATES
- * @sdev: the device to be evaluated
- * @state: the new target group state
+ * submit_stpg - Issue a SET TARGET PORT GROUP command
*
- * Evaluate a SET TARGET GROUP STATES command response.
+ * Currently we're only setting the current target port group state
+ * to 'active/optimized' and let the array firmware figure out
+ * the states of the remaining groups.
*/
-static void stpg_endio(struct request *req, int error)
+static int submit_stpg(struct scsi_device *sdev, int group_id,
+ struct scsi_sense_hdr *sshdr)
{
- struct alua_dh_data *h = req->end_io_data;
- struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_OK;
+ u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
+ unsigned char stpg_data[8];
+ int stpg_len = 8;
+ int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
- if (host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE) {
- err = SCSI_DH_IO;
- goto done;
+ /* Prepare the data buffer */
+ memset(stpg_data, 0, stpg_len);
+ stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
+ put_unaligned_be16(group_id, &stpg_data[6]);
+
+ /* Prepare the command. */
+ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
+ cdb[0] = MAINTENANCE_OUT;
+ cdb[1] = MO_SET_TARGET_PGS;
+ put_unaligned_be32(stpg_len, &cdb[6]);
+
+ return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ stpg_data, stpg_len,
+ sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, NULL, req_flags);
+}
+
+struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
+ int group_id)
+{
+ struct alua_port_group *pg;
+
+ list_for_each_entry(pg, &port_group_list, node) {
+ if (pg->group_id != group_id)
+ continue;
+ if (pg->device_id_len != id_size)
+ continue;
+ if (strncmp(pg->device_id_str, id_str, id_size))
+ continue;
+ if (!kref_get_unless_zero(&pg->kref))
+ continue;
+ return pg;
}
- if (scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
- &sense_hdr)) {
- if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
- /* ALUA state transition already in progress */
- err = SCSI_DH_OK;
- goto done;
- }
- if (sense_hdr.sense_key == UNIT_ATTENTION) {
- err = SCSI_DH_RETRY;
- goto done;
- }
- sdev_printk(KERN_INFO, h->sdev, "%s: stpg failed\n",
- ALUA_DH_NAME);
- scsi_print_sense_hdr(h->sdev, ALUA_DH_NAME, &sense_hdr);
- err = SCSI_DH_IO;
- } else if (error)
- err = SCSI_DH_IO;
-
- if (err == SCSI_DH_OK) {
- h->state = TPGS_STATE_OPTIMIZED;
- sdev_printk(KERN_INFO, h->sdev,
- "%s: port group %02x switched to state %c\n",
- ALUA_DH_NAME, h->group_id,
- print_alua_state(h->state));
- }
-done:
- req->end_io_data = NULL;
- __blk_put_request(req->q, req);
- if (h->callback_fn) {
- h->callback_fn(h->callback_data, err);
- h->callback_fn = h->callback_data = NULL;
- }
- return;
+ return NULL;
}
/*
- * submit_stpg - Issue a SET TARGET GROUP STATES command
+ * alua_alloc_pg - Allocate a new port_group structure
+ * @sdev: scsi device
+ * @h: alua device_handler data
+ * @group_id: port group id
*
- * Currently we're only setting the current target port group state
- * to 'active/optimized' and let the array firmware figure out
- * the states of the remaining groups.
+ * Allocate a new port_group structure for a given
+ * device.
*/
-static unsigned submit_stpg(struct alua_dh_data *h)
+struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
+ int group_id, int tpgs)
{
- struct request *rq;
- int stpg_len = 8;
- struct scsi_device *sdev = h->sdev;
+ struct alua_port_group *pg, *tmp_pg;
- /* Prepare the data buffer */
- memset(h->buff, 0, stpg_len);
- h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
- put_unaligned_be16(h->group_id, &h->buff[6]);
-
- rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
+ pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
+ if (!pg)
+ return ERR_PTR(-ENOMEM);
- /* Prepare the command. */
- rq->cmd[0] = MAINTENANCE_OUT;
- rq->cmd[1] = MO_SET_TARGET_PGS;
- put_unaligned_be32(stpg_len, &rq->cmd[6]);
- rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
+ pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
+ sizeof(pg->device_id_str));
+ if (pg->device_id_len <= 0) {
+ /*
+ * Internal error: TPGS supported but no device
+ * identifcation found. Disable ALUA support.
+ */
+ kfree(pg);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: No device descriptors found\n",
+ ALUA_DH_NAME);
+ return ERR_PTR(-ENXIO);
+ }
+ pg->group_id = group_id;
+ pg->tpgs = tpgs;
+ pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ if (optimize_stpg)
+ pg->flags |= ALUA_OPTIMIZE_STPG;
+ kref_init(&pg->kref);
+ INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
+ INIT_LIST_HEAD(&pg->rtpg_list);
+ INIT_LIST_HEAD(&pg->node);
+ INIT_LIST_HEAD(&pg->dh_list);
+ spin_lock_init(&pg->lock);
+
+ spin_lock(&port_group_lock);
+ tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
+ group_id);
+ if (tmp_pg) {
+ spin_unlock(&port_group_lock);
+ kfree(pg);
+ return tmp_pg;
+ }
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
- rq->end_io_data = h;
+ list_add(&pg->node, &port_group_list);
+ spin_unlock(&port_group_lock);
- blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return SCSI_DH_OK;
+ return pg;
}
/*
@@ -318,9 +327,13 @@ static int alua_check_tpgs(struct scsi_device *sdev)
* Extract the relative target port and the target port group
* descriptor from the list of identificators.
*/
-static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
+ int tpgs)
{
int rel_port = -1, group_id;
+ struct alua_port_group *pg, *old_pg = NULL;
+ bool pg_updated;
+ unsigned long flags;
group_id = scsi_vpd_tpg_id(sdev, &rel_port);
if (group_id < 0) {
@@ -334,32 +347,63 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h)
ALUA_DH_NAME);
return SCSI_DH_DEV_UNSUPP;
}
- h->state = TPGS_STATE_OPTIMIZED;
- h->group_id = group_id;
+ pg = alua_alloc_pg(sdev, group_id, tpgs);
+ if (IS_ERR(pg)) {
+ if (PTR_ERR(pg) == -ENOMEM)
+ return SCSI_DH_NOMEM;
+ return SCSI_DH_DEV_UNSUPP;
+ }
sdev_printk(KERN_INFO, sdev,
- "%s: port group %02x rel port %02x\n",
- ALUA_DH_NAME, h->group_id, h->rel_port);
+ "%s: device %s port group %x rel port %x\n",
+ ALUA_DH_NAME, pg->device_id_str, group_id, rel_port);
+
+ /* Check for existing port group references */
+ spin_lock(&h->pg_lock);
+ old_pg = h->pg;
+ if (old_pg != pg) {
+ /* port group has changed. Update to new port group */
+ if (h->pg) {
+ spin_lock_irqsave(&old_pg->lock, flags);
+ list_del_rcu(&h->node);
+ spin_unlock_irqrestore(&old_pg->lock, flags);
+ }
+ rcu_assign_pointer(h->pg, pg);
+ pg_updated = true;
+ }
- return 0;
+ spin_lock_irqsave(&pg->lock, flags);
+ if (sdev->synchronous_alua)
+ pg->flags |= ALUA_SYNC_STPG;
+ if (pg_updated)
+ list_add_rcu(&h->node, &pg->dh_list);
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ alua_rtpg_queue(h->pg, sdev, NULL, true);
+ spin_unlock(&h->pg_lock);
+
+ if (old_pg)
+ kref_put(&old_pg->kref, release_port_group);
+
+ return SCSI_DH_OK;
}
-static char print_alua_state(int state)
+static char print_alua_state(unsigned char state)
{
switch (state) {
- case TPGS_STATE_OPTIMIZED:
+ case SCSI_ACCESS_STATE_OPTIMAL:
return 'A';
- case TPGS_STATE_NONOPTIMIZED:
+ case SCSI_ACCESS_STATE_ACTIVE:
return 'N';
- case TPGS_STATE_STANDBY:
+ case SCSI_ACCESS_STATE_STANDBY:
return 'S';
- case TPGS_STATE_UNAVAILABLE:
+ case SCSI_ACCESS_STATE_UNAVAILABLE:
return 'U';
- case TPGS_STATE_LBA_DEPENDENT:
+ case SCSI_ACCESS_STATE_LBA:
return 'L';
- case TPGS_STATE_OFFLINE:
+ case SCSI_ACCESS_STATE_OFFLINE:
return 'O';
- case TPGS_STATE_TRANSITIONING:
+ case SCSI_ACCESS_STATE_TRANSITIONING:
return 'T';
default:
return 'X';
@@ -371,18 +415,24 @@ static int alua_check_sense(struct scsi_device *sdev,
{
switch (sense_hdr->sense_key) {
case NOT_READY:
- if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
- return ADD_TO_MLQUEUE;
+ alua_check(sdev, false);
+ return NEEDS_RETRY;
+ }
break;
case UNIT_ATTENTION:
- if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
/*
- * Power On, Reset, or Bus Device Reset, just retry.
+ * Power On, Reset, or Bus Device Reset.
+ * Might have obscured a state transition,
+ * so schedule a recheck.
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
+ }
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
/*
* Device internal reset
@@ -393,16 +443,20 @@ static int alua_check_sense(struct scsi_device *sdev,
* Mode Parameters Changed
*/
return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
/*
* ALUA state changed
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07)
+ }
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
/*
* Implicit ALUA state transition failed
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
+ }
if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
/*
* Inquiry data has changed
@@ -422,38 +476,71 @@ static int alua_check_sense(struct scsi_device *sdev,
}
/*
+ * alua_tur - Send a TEST UNIT READY
+ * @sdev: device to which the TEST UNIT READY command should be send
+ *
+ * Send a TEST UNIT READY to @sdev to figure out the device state
+ * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING,
+ * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
+ */
+static int alua_tur(struct scsi_device *sdev)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int retval;
+
+ retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &sense_hdr);
+ if (sense_hdr.sense_key == NOT_READY &&
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+ return SCSI_DH_RETRY;
+ else if (retval)
+ return SCSI_DH_IO;
+ else
+ return SCSI_DH_OK;
+}
+
+/*
* alua_rtpg - Evaluate REPORT TARGET GROUP STATES
* @sdev: the device to be evaluated.
- * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
*
* Evaluate the Target Port Group State.
* Returns SCSI_DH_DEV_OFFLINED if the path is
* found to be unusable.
*/
-static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
+static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
- int len, k, off, valid_states = 0;
- unsigned char *ucp;
+ struct alua_port_group *tmp_pg;
+ int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ unsigned char *desc, *buff;
unsigned err, retval;
- unsigned long expiry, interval = 0;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
+ unsigned long flags;
- if (!h->transition_tmo)
- expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
- else
- expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
+ if (!pg->expiry) {
+ unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
+
+ if (pg->transition_tmo)
+ transition_tmo = pg->transition_tmo * HZ;
+
+ pg->expiry = round_jiffies_up(jiffies + transition_tmo);
+ }
+
+ buff = kzalloc(bufflen, GFP_KERNEL);
+ if (!buff)
+ return SCSI_DH_DEV_TEMP_BUSY;
retry:
- retval = submit_rtpg(sdev, h);
+ retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
+
if (retval) {
- if (!scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
- &sense_hdr)) {
+ if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
- if (driver_byte(retval) == DRIVER_BUSY)
+ kfree(buff);
+ if (driver_byte(retval) == DRIVER_ERROR)
return SCSI_DH_DEV_TEMP_BUSY;
return SCSI_DH_IO;
}
@@ -466,10 +553,10 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
* The retry without rtpg_ext_hdr_req set
* handles this.
*/
- if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
+ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
sense_hdr.sense_key == ILLEGAL_REQUEST &&
sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
- h->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
+ pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
goto retry;
}
/*
@@ -481,65 +568,96 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
err = SCSI_DH_RETRY;
else if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
- if (err == SCSI_DH_RETRY && time_before(jiffies, expiry)) {
+ if (err == SCSI_DH_RETRY &&
+ pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
- goto retry;
+ return err;
}
sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+ kfree(buff);
+ pg->expiry = 0;
return SCSI_DH_IO;
}
- len = get_unaligned_be32(&h->buff[0]) + 4;
+ len = get_unaligned_be32(&buff[0]) + 4;
- if (len > h->bufflen) {
+ if (len > bufflen) {
/* Resubmit with the correct length */
- if (realloc_buffer(h, len)) {
+ kfree(buff);
+ bufflen = len;
+ buff = kmalloc(bufflen, GFP_KERNEL);
+ if (!buff) {
sdev_printk(KERN_WARNING, sdev,
"%s: kmalloc buffer failed\n",__func__);
/* Temporary failure, bypass */
+ pg->expiry = 0;
return SCSI_DH_DEV_TEMP_BUSY;
}
goto retry;
}
- orig_transition_tmo = h->transition_tmo;
- if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
- h->transition_tmo = h->buff[5];
+ orig_transition_tmo = pg->transition_tmo;
+ if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
+ pg->transition_tmo = buff[5];
else
- h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
+ pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
- if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
+ if (orig_transition_tmo != pg->transition_tmo) {
sdev_printk(KERN_INFO, sdev,
"%s: transition timeout set to %d seconds\n",
- ALUA_DH_NAME, h->transition_tmo);
- expiry = jiffies + h->transition_tmo * HZ;
+ ALUA_DH_NAME, pg->transition_tmo);
+ pg->expiry = jiffies + pg->transition_tmo * HZ;
}
- if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
+ if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
tpg_desc_tbl_off = 8;
else
tpg_desc_tbl_off = 4;
- for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
+ for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
k < len;
- k += off, ucp += off) {
-
- if (h->group_id == get_unaligned_be16(&ucp[2])) {
- h->state = ucp[0] & 0x0f;
- h->pref = ucp[0] >> 7;
- valid_states = ucp[1];
+ k += off, desc += off) {
+ u16 group_id = get_unaligned_be16(&desc[2]);
+
+ spin_lock_irqsave(&port_group_lock, flags);
+ tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
+ group_id);
+ spin_unlock_irqrestore(&port_group_lock, flags);
+ if (tmp_pg) {
+ if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
+ if ((tmp_pg == pg) ||
+ !(tmp_pg->flags & ALUA_PG_RUNNING)) {
+ struct alua_dh_data *h;
+
+ tmp_pg->state = desc[0] & 0x0f;
+ tmp_pg->pref = desc[0] >> 7;
+ rcu_read_lock();
+ list_for_each_entry_rcu(h,
+ &tmp_pg->dh_list, node) {
+ /* h->sdev should always be valid */
+ BUG_ON(!h->sdev);
+ h->sdev->access_state = desc[0];
+ }
+ rcu_read_unlock();
+ }
+ if (tmp_pg == pg)
+ valid_states = desc[1];
+ spin_unlock_irqrestore(&tmp_pg->lock, flags);
+ }
+ kref_put(&tmp_pg->kref, release_port_group);
}
- off = 8 + (ucp[7] * 4);
+ off = 8 + (desc[7] * 4);
}
+ spin_lock_irqsave(&pg->lock, flags);
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
- ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
- h->pref ? "preferred" : "non-preferred",
+ ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+ pg->pref ? "preferred" : "non-preferred",
valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
@@ -548,36 +666,236 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
- switch (h->state) {
- case TPGS_STATE_TRANSITIONING:
- if (wait_for_transition) {
- if (time_before(jiffies, expiry)) {
- /* State transition, retry */
- interval += 2000;
- msleep(interval);
- goto retry;
- }
+ switch (pg->state) {
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ if (time_before(jiffies, pg->expiry)) {
+ /* State transition, retry */
+ pg->interval = 2;
err = SCSI_DH_RETRY;
} else {
- err = SCSI_DH_OK;
- }
+ struct alua_dh_data *h;
- /* Transitioning time exceeded, set port to standby */
- h->state = TPGS_STATE_STANDBY;
+ /* Transitioning time exceeded, set port to standby */
+ err = SCSI_DH_IO;
+ pg->state = SCSI_ACCESS_STATE_STANDBY;
+ pg->expiry = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &pg->dh_list, node) {
+ BUG_ON(!h->sdev);
+ h->sdev->access_state =
+ (pg->state & SCSI_ACCESS_STATE_MASK);
+ if (pg->pref)
+ h->sdev->access_state |=
+ SCSI_ACCESS_STATE_PREFERRED;
+ }
+ rcu_read_unlock();
+ }
break;
- case TPGS_STATE_OFFLINE:
+ case SCSI_ACCESS_STATE_OFFLINE:
/* Path unusable */
err = SCSI_DH_DEV_OFFLINED;
+ pg->expiry = 0;
break;
default:
/* Useable path if active */
err = SCSI_DH_OK;
+ pg->expiry = 0;
break;
}
+ spin_unlock_irqrestore(&pg->lock, flags);
+ kfree(buff);
return err;
}
/*
+ * alua_stpg - Issue a SET TARGET PORT GROUP command
+ *
+ * Issue a SET TARGET PORT GROUP command and evaluate the
+ * response. Returns SCSI_DH_RETRY per default to trigger
+ * a re-evaluation of the target group state or SCSI_DH_OK
+ * if no further action needs to be taken.
+ */
+static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
+{
+ int retval;
+ struct scsi_sense_hdr sense_hdr;
+
+ if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
+ /* Only implicit ALUA supported, retry */
+ return SCSI_DH_RETRY;
+ }
+ switch (pg->state) {
+ case SCSI_ACCESS_STATE_OPTIMAL:
+ return SCSI_DH_OK;
+ case SCSI_ACCESS_STATE_ACTIVE:
+ if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
+ !pg->pref &&
+ (pg->tpgs & TPGS_MODE_IMPLICIT))
+ return SCSI_DH_OK;
+ break;
+ case SCSI_ACCESS_STATE_STANDBY:
+ case SCSI_ACCESS_STATE_UNAVAILABLE:
+ break;
+ case SCSI_ACCESS_STATE_OFFLINE:
+ return SCSI_DH_IO;
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed, unhandled TPGS state %d",
+ ALUA_DH_NAME, pg->state);
+ return SCSI_DH_NOSYS;
+ }
+ retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
+
+ if (retval) {
+ if (!scsi_sense_valid(&sense_hdr)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed, result %d",
+ ALUA_DH_NAME, retval);
+ if (driver_byte(retval) == DRIVER_ERROR)
+ return SCSI_DH_DEV_TEMP_BUSY;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
+ ALUA_DH_NAME);
+ scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+ }
+ }
+ /* Retry RTPG */
+ return SCSI_DH_RETRY;
+}
+
+static void alua_rtpg_work(struct work_struct *work)
+{
+ struct alua_port_group *pg =
+ container_of(work, struct alua_port_group, rtpg_work.work);
+ struct scsi_device *sdev;
+ LIST_HEAD(qdata_list);
+ int err = SCSI_DH_OK;
+ struct alua_queue_data *qdata, *tmp;
+ unsigned long flags;
+ struct workqueue_struct *alua_wq = kaluad_wq;
+
+ spin_lock_irqsave(&pg->lock, flags);
+ sdev = pg->rtpg_sdev;
+ if (!sdev) {
+ WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
+ WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
+ spin_unlock_irqrestore(&pg->lock, flags);
+ return;
+ }
+ if (pg->flags & ALUA_SYNC_STPG)
+ alua_wq = kaluad_sync_wq;
+ pg->flags |= ALUA_PG_RUNNING;
+ if (pg->flags & ALUA_PG_RUN_RTPG) {
+ int state = pg->state;
+
+ pg->flags &= ~ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
+ if (alua_tur(sdev) == SCSI_DH_RETRY) {
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ /* Send RTPG on failure or if TUR indicates SUCCESS */
+ }
+ err = alua_rtpg(sdev, pg);
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ if (err != SCSI_DH_OK)
+ pg->flags &= ~ALUA_PG_RUN_STPG;
+ }
+ if (pg->flags & ALUA_PG_RUN_STPG) {
+ pg->flags &= ~ALUA_PG_RUN_STPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ err = alua_stpg(sdev, pg);
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ pg->interval = 0;
+ pg->flags &= ~ALUA_PG_RUNNING;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ }
+
+ list_splice_init(&pg->rtpg_list, &qdata_list);
+ pg->rtpg_sdev = NULL;
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
+ list_del(&qdata->entry);
+ if (qdata->callback_fn)
+ qdata->callback_fn(qdata->callback_data, err);
+ kfree(qdata);
+ }
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+}
+
+static void alua_rtpg_queue(struct alua_port_group *pg,
+ struct scsi_device *sdev,
+ struct alua_queue_data *qdata, bool force)
+{
+ int start_queue = 0;
+ unsigned long flags;
+ struct workqueue_struct *alua_wq = kaluad_wq;
+
+ if (!pg)
+ return;
+
+ spin_lock_irqsave(&pg->lock, flags);
+ if (qdata) {
+ list_add_tail(&qdata->entry, &pg->rtpg_list);
+ pg->flags |= ALUA_PG_RUN_STPG;
+ force = true;
+ }
+ if (pg->rtpg_sdev == NULL) {
+ pg->interval = 0;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ kref_get(&pg->kref);
+ pg->rtpg_sdev = sdev;
+ scsi_device_get(sdev);
+ start_queue = 1;
+ } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ /* Do not queue if the worker is already running */
+ if (!(pg->flags & ALUA_PG_RUNNING)) {
+ kref_get(&pg->kref);
+ start_queue = 1;
+ }
+ }
+
+ if (pg->flags & ALUA_SYNC_STPG)
+ alua_wq = kaluad_sync_wq;
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ if (start_queue &&
+ !queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
+ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+ }
+}
+
+/*
* alua_initialize - Initialize ALUA state
* @sdev: the device to be initialized
*
@@ -586,21 +904,14 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
*/
static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
{
- int err = SCSI_DH_DEV_UNSUPP;
-
- h->tpgs = alua_check_tpgs(sdev);
- if (h->tpgs == TPGS_MODE_NONE)
- goto out;
-
- err = alua_check_vpd(sdev, h);
- if (err != SCSI_DH_OK)
- goto out;
-
- err = alua_rtpg(sdev, h, 0);
- if (err != SCSI_DH_OK)
- goto out;
-
-out:
+ int err = SCSI_DH_DEV_UNSUPP, tpgs;
+
+ mutex_lock(&h->init_mutex);
+ tpgs = alua_check_tpgs(sdev);
+ if (tpgs != TPGS_MODE_NONE)
+ err = alua_check_vpd(sdev, h, tpgs);
+ h->init_error = err;
+ mutex_unlock(&h->init_mutex);
return err;
}
/*
@@ -615,9 +926,11 @@ out:
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group __rcu *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
+ unsigned long flags;
if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
return -EINVAL;
@@ -627,18 +940,23 @@ static int alua_set_params(struct scsi_device *sdev, const char *params)
if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
return -EINVAL;
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg) {
+ rcu_read_unlock();
+ return -ENXIO;
+ }
+ spin_lock_irqsave(&pg->lock, flags);
if (optimize)
- h->flags |= ALUA_OPTIMIZE_STPG;
+ pg->flags |= ALUA_OPTIMIZE_STPG;
else
- h->flags &= ~ALUA_OPTIMIZE_STPG;
+ pg->flags &= ~ALUA_OPTIMIZE_STPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ rcu_read_unlock();
return result;
}
-static uint optimize_stpg;
-module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
-
/*
* alua_activate - activate a path
* @sdev: device on the path to be activated
@@ -654,48 +972,33 @@ static int alua_activate(struct scsi_device *sdev,
{
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
- int stpg = 0;
+ struct alua_queue_data *qdata;
+ struct alua_port_group __rcu *pg;
- err = alua_rtpg(sdev, h, 1);
- if (err != SCSI_DH_OK)
+ qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
+ if (!qdata) {
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
goto out;
-
- if (optimize_stpg)
- h->flags |= ALUA_OPTIMIZE_STPG;
-
- if (h->tpgs & TPGS_MODE_EXPLICIT) {
- switch (h->state) {
- case TPGS_STATE_NONOPTIMIZED:
- stpg = 1;
- if ((h->flags & ALUA_OPTIMIZE_STPG) &&
- (!h->pref) &&
- (h->tpgs & TPGS_MODE_IMPLICIT))
- stpg = 0;
- break;
- case TPGS_STATE_STANDBY:
- case TPGS_STATE_UNAVAILABLE:
- stpg = 1;
- break;
- case TPGS_STATE_OFFLINE:
- err = SCSI_DH_IO;
- break;
- case TPGS_STATE_TRANSITIONING:
- err = SCSI_DH_RETRY;
- break;
- default:
- break;
- }
}
-
- if (stpg) {
- h->callback_fn = fn;
- h->callback_data = data;
- err = submit_stpg(h);
- if (err == SCSI_DH_OK)
- return 0;
- h->callback_fn = h->callback_data = NULL;
+ qdata->callback_fn = fn;
+ qdata->callback_data = data;
+
+ mutex_lock(&h->init_mutex);
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg || !kref_get_unless_zero(&pg->kref)) {
+ rcu_read_unlock();
+ kfree(qdata);
+ err = h->init_error;
+ mutex_unlock(&h->init_mutex);
+ goto out;
}
+ fn = NULL;
+ rcu_read_unlock();
+ mutex_unlock(&h->init_mutex);
+ alua_rtpg_queue(pg, sdev, qdata, true);
+ kref_put(&pg->kref, release_port_group);
out:
if (fn)
fn(data, err);
@@ -703,6 +1006,29 @@ out:
}
/*
+ * alua_check - check path status
+ * @sdev: device on the path to be checked
+ *
+ * Check the device status
+ */
+static void alua_check(struct scsi_device *sdev, bool force)
+{
+ struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group *pg;
+
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg || !kref_get_unless_zero(&pg->kref)) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ alua_rtpg_queue(pg, sdev, NULL, force);
+ kref_put(&pg->kref, release_port_group);
+}
+
+/*
* alua_prep_fn - request callback
*
* Fail I/O to all paths not in state
@@ -711,13 +1037,20 @@ out:
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group __rcu *pg;
+ unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
- if (h->state == TPGS_STATE_TRANSITIONING)
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ state = pg->state;
+ rcu_read_unlock();
+ if (state == SCSI_ACCESS_STATE_TRANSITIONING)
ret = BLKPREP_DEFER;
- else if (h->state != TPGS_STATE_OPTIMIZED &&
- h->state != TPGS_STATE_NONOPTIMIZED &&
- h->state != TPGS_STATE_LBA_DEPENDENT) {
+ else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
+ state != SCSI_ACCESS_STATE_ACTIVE &&
+ state != SCSI_ACCESS_STATE_LBA) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
@@ -725,6 +1058,13 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
+static void alua_rescan(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h = sdev->handler_data;
+
+ alua_initialize(sdev, h);
+}
+
/*
* alua_bus_attach - Attach device handler
* @sdev: device to be attached to
@@ -732,20 +1072,21 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
static int alua_bus_attach(struct scsi_device *sdev)
{
struct alua_dh_data *h;
- int err;
+ int err, ret = -EINVAL;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
return -ENOMEM;
- h->tpgs = TPGS_MODE_UNINITIALIZED;
- h->state = TPGS_STATE_OPTIMIZED;
- h->group_id = -1;
- h->rel_port = -1;
- h->buff = h->inq;
- h->bufflen = ALUA_INQUIRY_SIZE;
+ spin_lock_init(&h->pg_lock);
+ rcu_assign_pointer(h->pg, NULL);
+ h->init_error = SCSI_DH_OK;
h->sdev = sdev;
+ INIT_LIST_HEAD(&h->node);
+ mutex_init(&h->init_mutex);
err = alua_initialize(sdev, h);
+ if (err == SCSI_DH_NOMEM)
+ ret = -ENOMEM;
if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
goto failed;
@@ -753,7 +1094,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
return 0;
failed:
kfree(h);
- return -EINVAL;
+ return ret;
}
/*
@@ -763,9 +1104,19 @@ failed:
static void alua_bus_detach(struct scsi_device *sdev)
{
struct alua_dh_data *h = sdev->handler_data;
-
- if (h->buff && h->inq != h->buff)
- kfree(h->buff);
+ struct alua_port_group *pg;
+
+ spin_lock(&h->pg_lock);
+ pg = h->pg;
+ rcu_assign_pointer(h->pg, NULL);
+ h->sdev = NULL;
+ spin_unlock(&h->pg_lock);
+ if (pg) {
+ spin_lock(&pg->lock);
+ list_del_rcu(&h->node);
+ spin_unlock(&pg->lock);
+ kref_put(&pg->kref, release_port_group);
+ }
sdev->handler_data = NULL;
kfree(h);
}
@@ -778,6 +1129,7 @@ static struct scsi_device_handler alua_dh = {
.prep_fn = alua_prep_fn,
.check_sense = alua_check_sense,
.activate = alua_activate,
+ .rescan = alua_rescan,
.set_params = alua_set_params,
};
@@ -785,16 +1137,31 @@ static int __init alua_init(void)
{
int r;
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+ if (!kaluad_wq) {
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ kaluad_sync_wq = create_workqueue("kaluad_sync");
+ if (!kaluad_sync_wq) {
+ destroy_workqueue(kaluad_wq);
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
r = scsi_register_device_handler(&alua_dh);
- if (r != 0)
+ if (r != 0) {
printk(KERN_ERR "%s: Failed to register scsi device handler",
ALUA_DH_NAME);
+ destroy_workqueue(kaluad_sync_wq);
+ destroy_workqueue(kaluad_wq);
+ }
return r;
}
static void __exit alua_exit(void)
{
scsi_unregister_device_handler(&alua_dh);
+ destroy_workqueue(kaluad_sync_wq);
+ destroy_workqueue(kaluad_wq);
}
module_init(alua_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e6fb97cb12f4..375d81850f15 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -199,7 +199,12 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
csdev->lun_state = csdev->buffer[4];
csdev->current_sp = csdev->buffer[8];
csdev->port = csdev->buffer[7];
-
+ if (csdev->lun_state == CLARIION_LUN_OWNED)
+ sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL;
+ else
+ sdev->access_state = SCSI_ACCESS_STATE_STANDBY;
+ if (csdev->default_sp == csdev->current_sp)
+ sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED;
out:
return err;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 93880ed6291c..06fbd0b0c68a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -165,6 +165,7 @@ struct rdac_controller {
struct work_struct ms_work;
struct scsi_device *ms_sdev;
struct list_head ms_head;
+ struct list_head dh_list;
};
struct c2_inquiry {
@@ -181,7 +182,9 @@ struct c2_inquiry {
};
struct rdac_dh_data {
+ struct list_head node;
struct rdac_controller *ctlr;
+ struct scsi_device *sdev;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
@@ -392,6 +395,7 @@ static struct rdac_controller *get_controller(int index, char *array_name,
INIT_WORK(&ctlr->ms_work, send_mode_select);
INIT_LIST_HEAD(&ctlr->ms_head);
list_add(&ctlr->node, &ctlr_list);
+ INIT_LIST_HEAD(&ctlr->dh_list);
return ctlr;
}
@@ -455,7 +459,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err;
+ int err, access_state;
+ struct rdac_dh_data *tmp;
struct c9_inquiry *inqp;
h->state = RDAC_STATE_ACTIVE;
@@ -471,19 +476,31 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
h->mode = RDAC_MODE; /* LUN in RDAC mode */
/* Update ownership */
- if (inqp->avte_cvp & 0x1)
+ if (inqp->avte_cvp & 0x1) {
h->lun_state = RDAC_LUN_OWNED;
- else {
+ access_state = SCSI_ACCESS_STATE_OPTIMAL;
+ } else {
h->lun_state = RDAC_LUN_UNOWNED;
- if (h->mode == RDAC_MODE)
+ if (h->mode == RDAC_MODE) {
h->state = RDAC_STATE_PASSIVE;
+ access_state = SCSI_ACCESS_STATE_STANDBY;
+ } else
+ access_state = SCSI_ACCESS_STATE_ACTIVE;
}
/* Update path prio*/
- if (inqp->path_prio & 0x1)
+ if (inqp->path_prio & 0x1) {
h->preferred = RDAC_PREFERRED;
- else
+ access_state |= SCSI_ACCESS_STATE_PREFERRED;
+ } else
h->preferred = RDAC_NON_PREFERRED;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
+ /* h->sdev should always be valid */
+ BUG_ON(!tmp->sdev);
+ tmp->sdev->access_state = access_state;
+ }
+ rcu_read_unlock();
}
return err;
@@ -508,6 +525,10 @@ static int initialize_controller(struct scsi_device *sdev,
h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
+ else {
+ list_add_rcu(&h->node, &h->ctlr->dh_list);
+ h->sdev = sdev;
+ }
spin_unlock(&list_lock);
}
return err;
@@ -829,8 +850,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
flush_workqueue(kmpath_rdacd);
spin_lock(&list_lock);
- if (h->ctlr)
+ if (h->ctlr) {
+ list_del_rcu(&h->node);
+ h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
+ }
spin_unlock(&list_lock);
sdev->handler_data = NULL;
kfree(h);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index d4cda5e9600e..21c8d210c456 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host)
*============================================================================
*/
+#ifdef MODULE
static struct pci_device_id dptids[] = {
{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, }
};
+#endif
+
MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index baf913047b48..3e8483410f61 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1360,14 +1360,15 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
if (ioctl->header.channel == 0xFF) {
a = (struct esas2r_adapter *)hostdata;
} else {
- a = esas2r_adapters[ioctl->header.channel];
- if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+ if (ioctl->header.channel >= MAX_ADAPTERS ||
+ esas2r_adapters[ioctl->header.channel] == NULL) {
ioctl->header.return_code = IOCTL_BAD_CHANNEL;
esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
kfree(ioctl);
return -ENOTSUPP;
}
+ a = esas2r_adapters[ioctl->header.channel];
}
switch (cmd) {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 34a1b1f333b4..3e83d485f743 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1118,7 +1118,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
* If this is the first validated FCF, note the time and
* set a timer to trigger selection.
*/
- if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) {
+ if (mtu_valid && !fip->sel_fcf && !fip->sel_time &&
+ fcoe_ctlr_fcf_usable(fcf)) {
fip->sel_time = jiffies +
msecs_to_jiffies(FCOE_CTLR_START_DELAY);
if (!timer_pending(&fip->timer) ||
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index d7597c08fa11..641c60e8fda3 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -93,36 +93,40 @@ static struct notifier_block libfcoe_notifier = {
int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_get_netdev(lport);
- struct ethtool_cmd ecmd;
+ struct ethtool_link_ksettings ecmd;
- if (!__ethtool_get_settings(netdev, &ecmd)) {
+ if (!__ethtool_get_link_ksettings(netdev, &ecmd)) {
lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT |
FC_PORTSPEED_10GBIT |
FC_PORTSPEED_20GBIT |
FC_PORTSPEED_40GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_1000baseKX_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & (SUPPORTED_10000baseT_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseR_FEC))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC))
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
- if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
- SUPPORTED_20000baseKR2_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseLR4_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
+ switch (ecmd.base.speed) {
case SPEED_1000:
lport->link_speed = FC_PORTSPEED_1GBIT;
break;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index eefe14d453db..b87ab38a4530 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1768,7 +1768,7 @@ struct scsi_host_template fdomain_driver_template = {
};
#ifndef PCMCIA
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && defined(MODULE)
static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 71e138044379..0a767740bf02 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2838,7 +2838,6 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt)
{
gdth_evt_str *e;
- struct timeval tv;
/* no GDTH_LOCK_HA() ! */
TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
@@ -2854,8 +2853,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
!strcmp((char *)&ebuffer[elastidx].event_data.event_string,
(char *)&evt->event_string)))) {
e = &ebuffer[elastidx];
- do_gettimeofday(&tv);
- e->last_stamp = tv.tv_sec;
+ e->last_stamp = (u32)ktime_get_real_seconds();
++e->same_count;
} else {
if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
@@ -2871,8 +2869,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
e = &ebuffer[elastidx];
e->event_source = source;
e->event_idx = idx;
- do_gettimeofday(&tv);
- e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
e->same_count = 1;
e->event_data = *evt;
e->application = 0;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index e66e997992e3..be609db66807 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -148,7 +148,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
gdth_cmd_str *gdtcmd;
gdth_evt_str *estr;
char hrec[161];
- struct timeval tv;
char *buf;
gdth_dskstat_str *pds;
@@ -540,8 +539,14 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
if (estr->event_data.eu.driver.ionode == ha->hanum &&
estr->event_source == ES_ASYNC) {
gdth_log_event(&estr->event_data, hrec);
- do_gettimeofday(&tv);
- sec = (int)(tv.tv_sec - estr->first_stamp);
+
+ /*
+ * Elapsed seconds subtraction with unsigned operands is
+ * safe from wrap around in year 2106. Executes as:
+ * operand a + (2's complement operand b) + 1
+ */
+
+ sec = (int)((u32)ktime_get_real_seconds() - estr->first_stamp);
if (sec < 0) sec = 0;
seq_printf(m," date- %02d:%02d:%02d\t%s\n",
sec/3600, sec%3600/60, sec%60, hrec);
diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile
index 3e70eae81343..c6d3a1b5fcb9 100644
--- a/drivers/scsi/hisi_sas/Makefile
+++ b/drivers/scsi/hisi_sas/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o
-obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o
+obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 5af2e4187f01..29e89f340b64 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -12,22 +12,24 @@
#ifndef _HISI_SAS_H_
#define _HISI_SAS_H_
+#include <linux/acpi.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
-#define DRV_VERSION "v1.0"
+#define DRV_VERSION "v1.3"
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
#define HISI_SAS_QUEUE_SLOTS 512
-#define HISI_SAS_MAX_ITCT_ENTRIES 4096
+#define HISI_SAS_MAX_ITCT_ENTRIES 2048
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_COMMAND_ENTRIES 8192
#define HISI_SAS_STATUS_BUF_SZ \
(sizeof(struct hisi_sas_err_record) + 1024)
@@ -36,6 +38,11 @@
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
+#define HISI_SAS_MAX_STP_RESP_SZ 28
+
+#define DEV_IS_EXPANDER(type) \
+ ((type == SAS_EDGE_EXPANDER_DEVICE) || \
+ (type == SAS_FANOUT_EXPANDER_DEVICE))
struct hisi_hba;
@@ -105,6 +112,7 @@ struct hisi_sas_slot {
int cmplt_queue;
int cmplt_queue_slot;
int idx;
+ int abort;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
void *status_buffer;
@@ -113,6 +121,7 @@ struct hisi_sas_slot {
dma_addr_t command_table_dma;
struct hisi_sas_sge_page *sge_page;
dma_addr_t sge_page_dma;
+ struct work_struct abort_slot;
};
struct hisi_sas_tmf_task {
@@ -132,6 +141,8 @@ struct hisi_sas_hw {
struct hisi_sas_tmf_task *tmf);
int (*prep_smp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
+ int (*prep_stp)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot);
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int abort);
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -140,6 +151,7 @@ struct hisi_sas_hw {
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+ int max_command_entries;
int complete_hdr_size;
};
@@ -244,18 +256,7 @@ struct hisi_sas_itct {
__le64 sas_addr;
__le64 qw2;
__le64 qw3;
- __le64 qw4;
- __le64 qw_sata_ncq0_3;
- __le64 qw_sata_ncq7_4;
- __le64 qw_sata_ncq11_8;
- __le64 qw_sata_ncq15_12;
- __le64 qw_sata_ncq19_16;
- __le64 qw_sata_ncq23_20;
- __le64 qw_sata_ncq27_24;
- __le64 qw_sata_ncq31_28;
- __le64 qw_non_ncq_iptt;
- __le64 qw_rsvd0;
- __le64 qw_rsvd1;
+ __le64 qw4_15[12];
};
struct hisi_sas_iost {
@@ -266,17 +267,7 @@ struct hisi_sas_iost {
};
struct hisi_sas_err_record {
- /* dw0 */
- __le32 dma_err_type;
-
- /* dw1 */
- __le32 trans_tx_fail_type;
-
- /* dw2 */
- __le32 trans_rx_fail_type;
-
- /* dw3 */
- u32 rsvd;
+ u32 data[4];
};
struct hisi_sas_initial_fis {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 99b1950d751c..097ab4f27a6b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -12,13 +12,12 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"
-#define DEV_IS_EXPANDER(type) \
- ((type == SAS_EDGE_EXPANDER_DEVICE) || \
- (type == SAS_FANOUT_EXPANDER_DEVICE))
-
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
+static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
+ u8 *lun, struct hisi_sas_tmf_task *tmf);
+
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
@@ -111,6 +110,50 @@ static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
}
+static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ return hisi_hba->hw->prep_stp(hisi_hba, slot);
+}
+
+/*
+ * This function will issue an abort TMF regardless of whether the
+ * task is in the sdev or not. Then it will do the task complete
+ * cleanup and callbacks.
+ */
+static void hisi_sas_slot_abort(struct work_struct *work)
+{
+ struct hisi_sas_slot *abort_slot =
+ container_of(work, struct hisi_sas_slot, abort_slot);
+ struct sas_task *task = abort_slot->task;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct hisi_sas_tmf_task tmf_task;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct scsi_lun lun;
+ struct device *dev = &hisi_hba->pdev->dev;
+ int tag = abort_slot->idx;
+
+ if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
+ dev_err(dev, "cannot abort slot for non-ssp task\n");
+ goto out;
+ }
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ tmf_task.tmf = TMF_ABORT_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
+out:
+ /* Do cleanup for this task */
+ hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
+ if (task->task_done)
+ task->task_done(task);
+ if (sas_dev && sas_dev->running_req)
+ sas_dev->running_req--;
+}
+
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
int is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
@@ -204,6 +247,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
slot->task = task;
slot->port = port;
task->lldd_task = slot;
+ INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
GFP_ATOMIC,
@@ -234,6 +278,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ rc = hisi_sas_task_prep_ata(hisi_hba, slot);
+ break;
default:
dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
@@ -407,6 +453,19 @@ static int hisi_sas_dev_found(struct domain_device *device)
return 0;
}
+static int hisi_sas_slave_configure(struct scsi_device *sdev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+ int ret = sas_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+ if (!dev_is_sata(dev))
+ sas_change_queue_depth(sdev, 64);
+
+ return 0;
+}
+
static void hisi_sas_scan_start(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@@ -657,7 +716,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -944,7 +1003,7 @@ static struct scsi_host_template hisi_sas_sht = {
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+ .slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -977,9 +1036,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
- int i, s;
struct platform_device *pdev = hisi_hba->pdev;
struct device *dev = &pdev->dev;
+ int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
spin_lock_init(&hisi_hba->lock);
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1039,13 +1098,13 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->itct, 0, s);
- hisi_hba->slot_info = devm_kcalloc(dev, HISI_SAS_COMMAND_ENTRIES,
+ hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
GFP_KERNEL);
if (!hisi_hba->slot_info)
goto err_out;
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+ s = max_command_entries * sizeof(struct hisi_sas_iost);
hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
GFP_KERNEL);
if (!hisi_hba->iost)
@@ -1053,7 +1112,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->iost, 0, s);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->breakpoint)
@@ -1061,7 +1120,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->breakpoint, 0, s);
- hisi_hba->slot_index_count = HISI_SAS_COMMAND_ENTRIES;
+ hisi_hba->slot_index_count = max_command_entries;
s = hisi_hba->slot_index_count / sizeof(unsigned long);
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
@@ -1079,7 +1138,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
memset(hisi_hba->initial_fis, 0, s);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
@@ -1102,7 +1161,7 @@ err_out:
static void hisi_sas_free(struct hisi_hba *hisi_hba)
{
struct device *dev = &hisi_hba->pdev->dev;
- int i, s;
+ int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
for (i = 0; i < hisi_hba->queue_count; i++) {
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
@@ -1127,12 +1186,12 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
dma_free_coherent(dev, s,
hisi_hba->itct, hisi_hba->itct_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+ s = max_command_entries * sizeof(struct hisi_sas_iost);
if (hisi_hba->iost)
dma_free_coherent(dev, s,
hisi_hba->iost, hisi_hba->iost_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
if (hisi_hba->breakpoint)
dma_free_coherent(dev, s,
hisi_hba->breakpoint,
@@ -1145,7 +1204,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->initial_fis,
hisi_hba->initial_fis_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
if (hisi_hba->sata_breakpoint)
dma_free_coherent(dev, s,
hisi_hba->sata_breakpoint,
@@ -1163,7 +1222,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
- struct property *sas_addr_prop;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
if (!shost)
@@ -1177,27 +1235,34 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
init_timer(&hisi_hba->timer);
- sas_addr_prop = of_find_property(np, "sas-addr", NULL);
- if (!sas_addr_prop || (sas_addr_prop->length != SAS_ADDR_SIZE))
+ if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
+ SAS_ADDR_SIZE))
goto err_out;
- memcpy(hisi_hba->sas_addr, sas_addr_prop->value, SAS_ADDR_SIZE);
- if (of_property_read_u32(np, "ctrl-reset-reg",
- &hisi_hba->ctrl_reset_reg))
- goto err_out;
+ if (np) {
+ hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
+ "hisilicon,sas-syscon");
+ if (IS_ERR(hisi_hba->ctrl))
+ goto err_out;
- if (of_property_read_u32(np, "ctrl-reset-sts-reg",
- &hisi_hba->ctrl_reset_sts_reg))
- goto err_out;
+ if (device_property_read_u32(dev, "ctrl-reset-reg",
+ &hisi_hba->ctrl_reset_reg))
+ goto err_out;
- if (of_property_read_u32(np, "ctrl-clock-ena-reg",
- &hisi_hba->ctrl_clock_ena_reg))
- goto err_out;
+ if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
+ &hisi_hba->ctrl_reset_sts_reg))
+ goto err_out;
- if (of_property_read_u32(np, "phy-count", &hisi_hba->n_phy))
+ if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
+ &hisi_hba->ctrl_clock_ena_reg))
+ goto err_out;
+ }
+
+ if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
- if (of_property_read_u32(np, "queue-count", &hisi_hba->queue_count))
+ if (device_property_read_u32(dev, "queue-count",
+ &hisi_hba->queue_count))
goto err_out;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1205,11 +1270,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
- hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(
- np, "hisilicon,sas-syscon");
- if (IS_ERR(hisi_hba->ctrl))
- goto err_out;
-
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
@@ -1277,8 +1337,8 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = HISI_SAS_COMMAND_ENTRIES;
- shost->cmd_per_lun = HISI_SAS_COMMAND_ENTRIES;
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
sha->sas_ha_name = DRV_NAME;
sha->dev = &hisi_hba->pdev->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index eea24d7531cf..1abbc2e162df 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -288,6 +288,20 @@ struct hisi_sas_complete_v1_hdr {
__le32 data;
};
+struct hisi_sas_err_record_v1 {
+ /* dw0 */
+ __le32 dma_err_type;
+
+ /* dw1 */
+ __le32 trans_tx_fail_type;
+
+ /* dw2 */
+ __le32 trans_rx_fail_type;
+
+ /* dw3 */
+ u32 rsvd;
+};
+
enum {
HISI_SAS_PHY_BCAST_ACK = 0,
HISI_SAS_PHY_SL_PHY_ENABLED,
@@ -392,6 +406,8 @@ enum {
TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
};
+#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
+
#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
#define HISI_SAS_FATAL_INT_NR (2)
@@ -607,31 +623,42 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
return -EIO;
}
- /* Apply reset and disable clock */
- /* clk disable reg is offset by +4 bytes from clk enable reg */
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
- RESET_VALUE);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
- RESET_VALUE);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
- if (RESET_VALUE != (val & RESET_VALUE)) {
- dev_err(dev, "Reset failed\n");
- return -EIO;
- }
+ if (ACPI_HANDLE(dev)) {
+ acpi_status s;
- /* De-reset and enable clock */
- /* deassert rst reg is offset by +4 bytes from assert reg */
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
- RESET_VALUE);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
- RESET_VALUE);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
- if (val & RESET_VALUE) {
- dev_err(dev, "De-reset failed\n");
- return -EIO;
- }
+ s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "Reset failed\n");
+ return -EIO;
+ }
+ } else if (hisi_hba->ctrl) {
+ /* Apply reset and disable clock */
+ /* clk disable reg is offset by +4 bytes from clk enable reg */
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+ RESET_VALUE);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+ RESET_VALUE);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (RESET_VALUE != (val & RESET_VALUE)) {
+ dev_err(dev, "Reset failed\n");
+ return -EIO;
+ }
+
+ /* De-reset and enable clock */
+ /* deassert rst reg is offset by +4 bytes from assert reg */
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+ RESET_VALUE);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+ RESET_VALUE);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (val & RESET_VALUE) {
+ dev_err(dev, "De-reset failed\n");
+ return -EIO;
+ }
+ } else
+ dev_warn(dev, "no reset method\n");
return 0;
}
@@ -1096,7 +1123,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record *err_record = slot->status_buffer;
+ struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
struct device *dev = &hisi_hba->pdev->dev;
switch (task->task_proto) {
@@ -1185,6 +1212,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_NAK_R_ERR;
break;
}
+ case TRANS_TX_CREDIT_TIMEOUT_ERR:
+ case TRANS_TX_CLOSE_NORMAL_ERR:
+ {
+ /* This will request a retry */
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ break;
+ }
default:
{
ts->stat = SAM_STAT_CHECK_CONDITION;
@@ -1220,7 +1255,6 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct domain_device *device;
enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
- (struct hisi_sas_complete_v1_hdr *)
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
u32 cmplt_hdr_data;
@@ -1293,6 +1327,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort)) {
+ queue_work(hisi_hba->wq, &slot->abort_slot);
+ /* immediately return and do not complete */
+ return ts->stat;
+ }
goto out;
}
@@ -1796,6 +1835,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
};
@@ -1815,12 +1855,20 @@ static const struct of_device_id sas_v1_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sas_v1_of_match);
+static const struct acpi_device_id sas_v1_acpi_match[] = {
+ { "HISI0161", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match);
+
static struct platform_driver hisi_sas_v1_driver = {
.probe = hisi_sas_v1_probe,
.remove = hisi_sas_v1_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
+ .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
new file mode 100644
index 000000000000..b7337476454b
--- /dev/null
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -0,0 +1,2214 @@
+/*
+ * Copyright (c) 2016 Linaro Ltd.
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include "hisi_sas.h"
+#define DRV_NAME "hisi_sas_v2_hw"
+
+/* global registers need init*/
+#define DLVRY_QUEUE_ENABLE 0x0
+#define IOST_BASE_ADDR_LO 0x8
+#define IOST_BASE_ADDR_HI 0xc
+#define ITCT_BASE_ADDR_LO 0x10
+#define ITCT_BASE_ADDR_HI 0x14
+#define IO_BROKEN_MSG_ADDR_LO 0x18
+#define IO_BROKEN_MSG_ADDR_HI 0x1c
+#define PHY_CONTEXT 0x20
+#define PHY_STATE 0x24
+#define PHY_PORT_NUM_MA 0x28
+#define PORT_STATE 0x2c
+#define PORT_STATE_PHY8_PORT_NUM_OFF 16
+#define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
+#define PORT_STATE_PHY8_CONN_RATE_OFF 20
+#define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
+#define PHY_CONN_RATE 0x30
+#define HGC_TRANS_TASK_CNT_LIMIT 0x38
+#define AXI_AHB_CLK_CFG 0x3c
+#define ITCT_CLR 0x44
+#define ITCT_CLR_EN_OFF 16
+#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
+#define ITCT_DEV_OFF 0
+#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
+#define AXI_USER1 0x48
+#define AXI_USER2 0x4c
+#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
+#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
+#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
+#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
+#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
+#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
+#define HGC_GET_ITV_TIME 0x90
+#define DEVICE_MSG_WORK_MODE 0x94
+#define OPENA_WT_CONTI_TIME 0x9c
+#define I_T_NEXUS_LOSS_TIME 0xa0
+#define MAX_CON_TIME_LIMIT_TIME 0xa4
+#define BUS_INACTIVE_LIMIT_TIME 0xa8
+#define REJECT_TO_OPEN_LIMIT_TIME 0xac
+#define CFG_AGING_TIME 0xbc
+#define HGC_DFX_CFG2 0xc0
+#define HGC_IOMB_PROC1_STATUS 0x104
+#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_INVLD_DQE_INFO 0x148
+#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
+#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
+#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define INT_COAL_EN 0x19c
+#define OQ_INT_COAL_TIME 0x1a0
+#define OQ_INT_COAL_CNT 0x1a4
+#define ENT_INT_COAL_TIME 0x1a8
+#define ENT_INT_COAL_CNT 0x1ac
+#define OQ_INT_SRC 0x1b0
+#define OQ_INT_SRC_MSK 0x1b4
+#define ENT_INT_SRC1 0x1b8
+#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
+#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
+#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
+#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
+#define ENT_INT_SRC2 0x1bc
+#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_ITC_INT_OFF 15
+#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC_MSK1 0x1c4
+#define ENT_INT_SRC_MSK2 0x1c8
+#define ENT_INT_SRC_MSK3 0x1cc
+#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
+#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR_MSK 0x1ec
+#define HGC_ERR_STAT_EN 0x238
+#define DLVRY_Q_0_BASE_ADDR_LO 0x260
+#define DLVRY_Q_0_BASE_ADDR_HI 0x264
+#define DLVRY_Q_0_DEPTH 0x268
+#define DLVRY_Q_0_WR_PTR 0x26c
+#define DLVRY_Q_0_RD_PTR 0x270
+#define HYPER_STREAM_ID_EN_CFG 0xc80
+#define OQ0_INT_SRC_MSK 0xc90
+#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
+#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
+#define COMPL_Q_0_DEPTH 0x4e8
+#define COMPL_Q_0_WR_PTR 0x4ec
+#define COMPL_Q_0_RD_PTR 0x4f0
+
+/* phy registers need init */
+#define PORT_BASE (0x2000)
+
+#define PHY_CFG (PORT_BASE + 0x0)
+#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
+#define PHY_CFG_ENA_OFF 0
+#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
+#define PHY_CFG_DC_OPT_OFF 2
+#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
+#define PROG_PHY_LINK_RATE_MAX_OFF 0
+#define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
+#define PHY_CTRL (PORT_BASE + 0x14)
+#define PHY_CTRL_RESET_OFF 0
+#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
+#define SAS_PHY_CTRL (PORT_BASE + 0x20)
+#define SL_CFG (PORT_BASE + 0x84)
+#define PHY_PCN (PORT_BASE + 0x44)
+#define SL_TOUT_CFG (PORT_BASE + 0x8c)
+#define SL_CONTROL (PORT_BASE + 0x94)
+#define SL_CONTROL_NOTIFY_EN_OFF 0
+#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
+#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
+#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
+#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
+#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
+#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
+#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
+#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
+#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
+#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
+#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0)
+#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4)
+#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
+#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
+#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
+#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
+#define CHL_INT0 (PORT_BASE + 0x1b4)
+#define CHL_INT0_HOTPLUG_TOUT_OFF 0
+#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
+#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
+#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
+#define CHL_INT0_SL_PHY_ENABLE_OFF 2
+#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
+#define CHL_INT0_NOT_RDY_OFF 4
+#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
+#define CHL_INT0_PHY_RDY_OFF 5
+#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
+#define CHL_INT1 (PORT_BASE + 0x1b8)
+#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
+#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
+#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
+#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
+#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
+#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
+#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
+#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
+#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
+#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
+#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
+#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
+#define DMA_TX_STATUS (PORT_BASE + 0x2d0)
+#define DMA_TX_STATUS_BUSY_OFF 0
+#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
+#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
+#define DMA_RX_STATUS_BUSY_OFF 0
+#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
+
+#define AXI_CFG (0x5100)
+#define AM_CFG_MAX_TRANS (0x5010)
+#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
+
+/* HW dma structures */
+/* Delivery queue header */
+/* dw0 */
+#define CMD_HDR_RESP_REPORT_OFF 5
+#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
+#define CMD_HDR_TLR_CTRL_OFF 6
+#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PORT_OFF 18
+#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
+#define CMD_HDR_PRIORITY_OFF 27
+#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
+#define CMD_HDR_CMD_OFF 29
+#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
+/* dw1 */
+#define CMD_HDR_DIR_OFF 5
+#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
+#define CMD_HDR_RESET_OFF 7
+#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
+#define CMD_HDR_VDTL_OFF 10
+#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
+#define CMD_HDR_FRAME_TYPE_OFF 11
+#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
+#define CMD_HDR_DEV_ID_OFF 16
+#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
+/* dw2 */
+#define CMD_HDR_CFL_OFF 0
+#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
+#define CMD_HDR_NCQ_TAG_OFF 10
+#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
+#define CMD_HDR_MRFL_OFF 15
+#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
+#define CMD_HDR_SG_MOD_OFF 24
+#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
+#define CMD_HDR_FIRST_BURST_OFF 26
+#define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF)
+/* dw3 */
+#define CMD_HDR_IPTT_OFF 0
+#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
+/* dw6 */
+#define CMD_HDR_DIF_SGL_LEN_OFF 0
+#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
+#define CMD_HDR_DATA_SGL_LEN_OFF 16
+#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+
+/* Completion header */
+/* dw0 */
+#define CMPLT_HDR_RSPNS_XFRD_OFF 10
+#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_ERX_OFF 12
+#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
+/* dw1 */
+#define CMPLT_HDR_IPTT_OFF 0
+#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
+#define CMPLT_HDR_DEV_ID_OFF 16
+#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
+
+/* ITCT header */
+/* qw0 */
+#define ITCT_HDR_DEV_TYPE_OFF 0
+#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
+#define ITCT_HDR_VALID_OFF 2
+#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
+#define ITCT_HDR_MCR_OFF 5
+#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
+#define ITCT_HDR_VLN_OFF 9
+#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_PORT_ID_OFF 28
+#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
+/* qw2 */
+#define ITCT_HDR_INLT_OFF 0
+#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
+#define ITCT_HDR_BITLT_OFF 16
+#define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF)
+#define ITCT_HDR_MCTLT_OFF 32
+#define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF)
+#define ITCT_HDR_RTOLT_OFF 48
+#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+
+struct hisi_sas_complete_v2_hdr {
+ __le32 dw0;
+ __le32 dw1;
+ __le32 act;
+ __le32 dw3;
+};
+
+struct hisi_sas_err_record_v2 {
+ /* dw0 */
+ __le32 trans_tx_fail_type;
+
+ /* dw1 */
+ __le32 trans_rx_fail_type;
+
+ /* dw2 */
+ __le16 dma_tx_err_type;
+ __le16 sipc_rx_err_type;
+
+ /* dw3 */
+ __le32 dma_rx_err_type;
+};
+
+enum {
+ HISI_SAS_PHY_PHY_UPDOWN,
+ HISI_SAS_PHY_CHNL_INT,
+ HISI_SAS_PHY_INT_NR
+};
+
+enum {
+ TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
+ TRANS_RX_FAIL_BASE = 0x100, /* dw1 */
+ DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */
+ SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/
+ DMA_RX_ERR_BASE = 0x400, /* dw3 */
+
+ /* trans tx*/
+ TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
+ TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */
+ TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */
+ TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */
+ TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */
+ RESERVED0, /* 0x5 */
+ TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */
+ TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */
+ TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */
+ TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */
+ TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */
+ TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */
+ TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */
+ TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */
+ TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */
+ TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */
+ TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */
+ TRANS_TX_ERR_FRAME_TXED, /* 0x11 */
+ TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */
+ TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */
+ TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */
+ TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */
+ TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/
+ TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */
+ TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */
+ TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */
+ TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/
+ TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/
+ /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
+ TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */
+ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
+ TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */
+ TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */
+ /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
+ TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
+
+ /* trans rx */
+ TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */
+ TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */
+ /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */
+ /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/
+ TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */
+ TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */
+ TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */
+ TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */
+ RESERVED1, /* 0x10b */
+ TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */
+ TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */
+ TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */
+ TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */
+ TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */
+ TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */
+ /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
+ TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/
+ /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
+ TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */
+ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
+ RESERVED2, /* 0x114 */
+ RESERVED3, /* 0x115 */
+ RESERVED4, /* 0x116 */
+ RESERVED5, /* 0x117 */
+ TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */
+ TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */
+ TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */
+ RESERVED6, /* 0x11b */
+ RESERVED7, /* 0x11c */
+ RESERVED8, /* 0x11d */
+ RESERVED9, /* 0x11e */
+ TRANS_RX_R_ERR, /* 0x11f */
+
+ /* dma tx */
+ DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */
+ DMA_TX_DIF_APP_ERR, /* 0x201 */
+ DMA_TX_DIF_RPP_ERR, /* 0x202 */
+ DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */
+ DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */
+ DMA_TX_UNEXP_XFER_ERR, /* 0x205 */
+ DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */
+ DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */
+ DMA_TX_XFER_OFFSET_ERR, /* 0x208 */
+ DMA_TX_RAM_ECC_ERR, /* 0x209 */
+ DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */
+
+ /* sipc rx */
+ SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */
+ SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */
+ SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */
+ SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */
+ SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */
+ SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */
+ SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */
+ SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */
+ SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */
+ SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */
+ SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */
+
+ /* dma rx */
+ DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */
+ DMA_RX_DIF_APP_ERR, /* 0x401 */
+ DMA_RX_DIF_RPP_ERR, /* 0x402 */
+ DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */
+ DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */
+ DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */
+ DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */
+ DMA_RX_DATA_OFFSET_ERR, /* 0x407 */
+ RESERVED10, /* 0x408 */
+ DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */
+ DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */
+ DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */
+ DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */
+ DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */
+ DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */
+ DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */
+ DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */
+ DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */
+ DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */
+ DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */
+ DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */
+ DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */
+ DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */
+ DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */
+ DMA_RX_RAM_ECC_ERR, /* 0x418 */
+ DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */
+};
+
+#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
+
+#define DIR_NO_DATA 0
+#define DIR_TO_INI 1
+#define DIR_TO_DEVICE 2
+#define DIR_RESERVED 3
+
+#define SATA_PROTOCOL_NONDATA 0x1
+#define SATA_PROTOCOL_PIO 0x2
+#define SATA_PROTOCOL_DMA 0x4
+#define SATA_PROTOCOL_FPDMA 0x8
+#define SATA_PROTOCOL_ATAPI 0x10
+
+static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl(regs);
+}
+
+static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl_relaxed(regs);
+}
+
+static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ writel(val, regs);
+}
+
+static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
+ u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ writel(val, regs);
+}
+
+static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
+ int phy_no, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ return readl(regs);
+}
+
+static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_DC_OPT_MSK;
+ cfg |= 1 << PHY_CFG_DC_OPT_OFF;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct sas_identify_frame identify_frame;
+ u32 *identify_buffer;
+
+ memset(&identify_frame, 0, sizeof(identify_frame));
+ identify_frame.dev_type = SAS_END_DEVICE;
+ identify_frame.frame_type = 0;
+ identify_frame._un1 = 1;
+ identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
+ identify_frame.target_bits = SAS_PROTOCOL_NONE;
+ memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ identify_frame.phy_id = phy_no;
+ identify_buffer = (u32 *)(&identify_frame);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
+ __swab32(identify_buffer[0]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
+ identify_buffer[2]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
+ identify_buffer[1]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
+ identify_buffer[4]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
+ identify_buffer[3]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
+ __swab32(identify_buffer[5]));
+}
+
+static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ config_id_frame_v2_hw(hisi_hba, i);
+}
+
+static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ struct domain_device *device = sas_dev->sas_device;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u64 qw0, device_id = sas_dev->device_id;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_port *port = device->port->lldd_port;
+
+ memset(itct, 0, sizeof(*itct));
+
+ /* qw0 */
+ qw0 = 0;
+ switch (sas_dev->dev_type) {
+ case SAS_END_DEVICE:
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ case SAS_SATA_DEV:
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
+ else
+ qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ default:
+ dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+ sas_dev->dev_type);
+ }
+
+ qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
+ (device->max_linkrate << ITCT_HDR_MCR_OFF) |
+ (1 << ITCT_HDR_VLN_OFF) |
+ (port->id << ITCT_HDR_PORT_ID_OFF));
+ itct->qw0 = cpu_to_le64(qw0);
+
+ /* qw1 */
+ memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = __swab64(itct->sas_addr);
+
+ /* qw2 */
+ itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+ (0xff00ULL << ITCT_HDR_BITLT_OFF) |
+ (0xff00ULL << ITCT_HDR_MCTLT_OFF) |
+ (0xff00ULL << ITCT_HDR_RTOLT_OFF));
+}
+
+static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ u64 qw0, dev_id = sas_dev->device_id;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
+ u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ int i;
+
+ /* clear the itct interrupt state */
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+
+ /* clear the itct int*/
+ for (i = 0; i < 2; i++) {
+ /* clear the itct table*/
+ reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+
+ udelay(10);
+ reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
+ dev_dbg(dev, "got clear ITCT done interrupt\n");
+
+ /* invalid the itct state*/
+ qw0 = cpu_to_le64(itct->qw0);
+ qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+ hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
+ hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
+
+ /* clear the itct */
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ break;
+ }
+ }
+}
+
+static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i, reset_val;
+ u32 val;
+ unsigned long end_time;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ /* The mask needs to be set depending on the number of phys */
+ if (hisi_hba->n_phy == 9)
+ reset_val = 0x1fffff;
+ else
+ reset_val = 0x7ffff;
+
+ /* Disable all of the DQ */
+ for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+ /* Disable all of the PHYs */
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG);
+
+ phy_cfg &= ~PHY_CTRL_RESET_MSK;
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg);
+ }
+ udelay(50);
+
+ /* Ensure DMA tx & rx idle */
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ u32 dma_tx_status, dma_rx_status;
+
+ end_time = jiffies + msecs_to_jiffies(1000);
+
+ while (1) {
+ dma_tx_status = hisi_sas_phy_read32(hisi_hba, i,
+ DMA_TX_STATUS);
+ dma_rx_status = hisi_sas_phy_read32(hisi_hba, i,
+ DMA_RX_STATUS);
+
+ if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) &&
+ !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK))
+ break;
+
+ msleep(20);
+ if (time_after(jiffies, end_time))
+ return -EIO;
+ }
+ }
+
+ /* Ensure axi bus idle */
+ end_time = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ u32 axi_status =
+ hisi_sas_read32(hisi_hba, AXI_CFG);
+
+ if (axi_status == 0)
+ break;
+
+ msleep(20);
+ if (time_after(jiffies, end_time))
+ return -EIO;
+ }
+
+ /* reset and disable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (reset_val != (val & reset_val)) {
+ dev_err(dev, "SAS reset fail.\n");
+ return -EIO;
+ }
+
+ /* De-reset and enable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
+ &val);
+ if (val & reset_val) {
+ dev_err(dev, "SAS de-reset fail.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct device_node *np = dev->of_node;
+ int i;
+
+ /* Global registers init */
+
+ /* Deal with am-max-transmissions quirk */
+ if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) {
+ hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
+ hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
+ 0x2020);
+ } /* Else, use defaults -> do nothing */
+
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
+ hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
+ hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
+ hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20);
+ hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
+ hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+
+ hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
+ hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
+ hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
+ hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ }
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ /* Delivery queue */
+ hisi_sas_write32(hisi_hba,
+ DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+
+ /* Completion queue */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+ }
+
+ /* itct */
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->itct_dma));
+
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->itct_dma));
+
+ /* iost */
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->iost_dma));
+
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->iost_dma));
+
+ /* breakpoint */
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->breakpoint_dma));
+
+ /* SATA broken msg */
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ /* SATA initial fis */
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
+ lower_32_bits(hisi_hba->initial_fis_dma));
+
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
+ upper_32_bits(hisi_hba->initial_fis_dma));
+}
+
+static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ int rc;
+
+ rc = reset_hw_v2_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ return rc;
+ }
+
+ msleep(100);
+ init_reg_v2_hw(hisi_hba);
+
+ init_id_frame_v2_hw(hisi_hba);
+
+ return 0;
+}
+
+static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg |= PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ config_id_frame_v2_hw(hisi_hba, phy_no);
+ config_phy_opt_mode_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ disable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ stop_phy_v2_hw(hisi_hba, phy_no);
+ msleep(100);
+ start_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void start_phys_v2_hw(unsigned long data)
+{
+ struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ start_phy_v2_hw(hisi_hba, i);
+}
+
+static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+ struct timer_list *timer = &hisi_hba->timer;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
+ hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
+ }
+
+ setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
+ mod_timer(timer, jiffies + HZ);
+}
+
+static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 sl_control;
+
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+ msleep(1);
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+}
+
+static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
+{
+ int i, bitmap = 0;
+ u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++)
+ if (phy_state & 1 << i)
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= 1 << i;
+
+ if (hisi_hba->n_phy == 9) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ if (phy_state & 1 << 8)
+ if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF) == port_id)
+ bitmap |= 1 << 9;
+ }
+
+ return bitmap;
+}
+
+/**
+ * This function allocates across all queues to load balance.
+ * Slots are allocated from queues in a round-robin fashion.
+ *
+ * The callpath to this function and upto writing the write
+ * queue pointer should be safe from interruption.
+ */
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 r, w;
+ int queue = hisi_hba->queue;
+
+ while (1) {
+ w = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_WR_PTR + (queue * 0x14));
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ queue = (queue + 1) % hisi_hba->queue_count;
+ if (queue == hisi_hba->queue) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
+ }
+ continue;
+ }
+ break;
+ }
+ hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+ *q = queue;
+ *s = w;
+ return 0;
+}
+
+static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+ ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+}
+
+static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_cmd_hdr *hdr,
+ struct scatterlist *scatter,
+ int n_elem)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct scatterlist *sg;
+ int i;
+
+ if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ n_elem);
+ return -EINVAL;
+ }
+
+ slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
+ &slot->sge_page_dma);
+ if (!slot->sge_page)
+ return -ENOMEM;
+
+ for_each_sg(scatter, sg, n_elem, i) {
+ struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+
+ entry->addr = cpu_to_le64(sg_dma_address(sg));
+ entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
+ entry->data_len = cpu_to_le32(sg_dma_len(sg));
+ entry->data_off = 0;
+ }
+
+ hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+
+ hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+
+ return 0;
+}
+
+static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_port *port = slot->port;
+ struct scatterlist *sg_req, *sg_resp;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ dma_addr_t req_dma_addr;
+ unsigned int req_len, resp_len;
+ int elem, rc;
+
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ /* req */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+ req_dma_addr = sg_dma_address(sg_req);
+
+ /* resp */
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out_req;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_resp;
+ }
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
+ (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
+ (2 << CMD_HDR_CMD_OFF)); /* smp */
+
+ /* map itct entry */
+ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
+ (1 << CMD_HDR_FRAME_TYPE_OFF) |
+ (DIR_NO_DATA << CMD_HDR_DIR_OFF));
+
+ /* dw2 */
+ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ CMD_HDR_MRFL_OFF));
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
+
+ hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ return 0;
+
+err_out_resp:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+err_out_req:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ return rc;
+}
+
+static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot, int is_tmf,
+ struct hisi_sas_tmf_task *tmf)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_port *port = slot->port;
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+ int has_data = 0, rc, priority = is_tmf;
+ u8 *buf_cmd;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
+ (2 << CMD_HDR_TLR_CTRL_OFF) |
+ (port->id << CMD_HDR_PORT_OFF) |
+ (priority << CMD_HDR_PRIORITY_OFF) |
+ (1 << CMD_HDR_CMD_OFF)); /* ssp */
+
+ dw1 = 1 << CMD_HDR_VDTL_OFF;
+ if (is_tmf) {
+ dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
+ } else {
+ dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+ }
+
+ /* map itct entry */
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+ + 3) / 4) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
+ (2 << CMD_HDR_SG_MOD_OFF);
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ if (!is_tmf) {
+ buf_cmd[9] = task->ssp_task.task_attr |
+ (task->ssp_task.task_prio << 3);
+ memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+ } else {
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
+ struct dev_to_host_fis *d2h = slot->status_buffer +
+ sizeof(struct hisi_sas_err_record);
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
+
+ ts->buf_valid_size = sizeof(*resp);
+}
+
+/* by default, task resp is complete */
+static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
+ u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
+ u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
+ u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
+ u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
+ u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
+ int error = -1;
+
+ if (dma_rx_err_type) {
+ error = ffs(dma_rx_err_type)
+ - 1 + DMA_RX_ERR_BASE;
+ } else if (sipc_rx_err_type) {
+ error = ffs(sipc_rx_err_type)
+ - 1 + SIPC_RX_ERR_BASE;
+ } else if (dma_tx_err_type) {
+ error = ffs(dma_tx_err_type)
+ - 1 + DMA_TX_ERR_BASE;
+ } else if (trans_rx_fail_type) {
+ error = ffs(trans_rx_fail_type)
+ - 1 + TRANS_RX_FAIL_BASE;
+ } else if (trans_tx_fail_type) {
+ error = ffs(trans_tx_fail_type)
+ - 1 + TRANS_TX_FAIL_BASE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ {
+ switch (error) {
+ case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_NO_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+ {
+ /* not sure */
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+ {
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+ {
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ case DMA_RX_DATA_LEN_OVERFLOW:
+ {
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ break;
+ }
+ case DMA_RX_DATA_LEN_UNDERFLOW:
+ case SIPC_RX_DATA_UNDERFLOW_ERR:
+ {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ }
+ case TRANS_TX_ERR_FRAME_TXED:
+ {
+ /* This will request a retry */
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ break;
+ }
+ case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+ case TRANS_TX_ERR_PHY_NOT_ENABLE:
+ case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+ case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+ case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
+ case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
+ case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_RX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_RX_ERR_WITH_DATA_LEN0:
+ case TRANS_RX_ERR_WITH_BAD_HASH:
+ case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+ case TRANS_RX_SSP_FRM_LEN_ERR:
+ case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
+ case DMA_TX_UNEXP_XFER_ERR:
+ case DMA_TX_UNEXP_RETRANS_ERR:
+ case DMA_TX_XFER_LEN_OVERFLOW:
+ case DMA_TX_XFER_OFFSET_ERR:
+ case DMA_RX_DATA_OFFSET_ERR:
+ case DMA_RX_UNEXP_NORM_RESP_ERR:
+ case DMA_RX_UNEXP_RDFRAME_ERR:
+ case DMA_RX_UNKNOWN_FRM_ERR:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
+ case SAS_PROTOCOL_SMP:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ switch (error) {
+ case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+ {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+ case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+ case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+ case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+ case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+ {
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ case DMA_RX_DATA_LEN_OVERFLOW:
+ {
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ }
+ case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+ case TRANS_TX_ERR_PHY_NOT_ENABLE:
+ case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+ case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+ case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
+ case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
+ case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
+ case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
+ case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+ case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_RX_ERR_WITH_DATA_LEN0:
+ case TRANS_RX_ERR_WITH_BAD_HASH:
+ case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+ case TRANS_RX_SSP_FRM_LEN_ERR:
+ case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
+ case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
+ case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
+ case SIPC_RX_WRSETUP_LEN_ODD_ERR:
+ case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
+ case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
+ case SIPC_RX_SATA_UNEXP_FIS_ERR:
+ case DMA_RX_SATA_FRAME_TYPE_ERR:
+ case DMA_RX_UNEXP_RDFRAME_ERR:
+ case DMA_RX_PIO_DATA_LEN_ERR:
+ case DMA_RX_RDSETUP_STATUS_ERR:
+ case DMA_RX_RDSETUP_STATUS_DRQ_ERR:
+ case DMA_RX_RDSETUP_STATUS_BSY_ERR:
+ case DMA_RX_RDSETUP_LEN_ODD_ERR:
+ case DMA_RX_RDSETUP_LEN_ZERO_ERR:
+ case DMA_RX_RDSETUP_LEN_OVER_ERR:
+ case DMA_RX_RDSETUP_OFFSET_ERR:
+ case DMA_RX_RDSETUP_ACTIVE_ERR:
+ case DMA_RX_RDSETUP_ESTATUS_ERR:
+ case DMA_RX_UNKNOWN_FRM_ERR:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ default:
+ {
+ ts->stat = SAS_PROTO_RESPONSE;
+ break;
+ }
+ }
+ sata_done_v2_hw(hisi_hba, task, slot);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
+ int abort)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_device *sas_dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct task_status_struct *ts;
+ struct domain_device *device;
+ enum exec_status sts;
+ struct hisi_sas_complete_v2_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v2_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return -EINVAL;
+
+ ts = &task->task_status;
+ device = task->dev;
+ sas_dev = device->lldd_dev;
+
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+
+ memset(ts, 0, sizeof(*ts));
+ ts->resp = SAS_TASK_COMPLETE;
+
+ if (unlikely(!sas_dev || abort)) {
+ if (!sas_dev)
+ dev_dbg(dev, "slot complete: port has not device\n");
+ ts->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
+ (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+
+ slot_err_v2_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort)) {
+ queue_work(hisi_hba->wq, &slot->abort_slot);
+ /* immediately return and do not complete */
+ return ts->stat;
+ }
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ {
+ struct ssp_response_iu *iu = slot->status_buffer +
+ sizeof(struct hisi_sas_err_record);
+
+ sas_ssp_task_response(dev, task, iu);
+ break;
+ }
+ case SAS_PROTOCOL_SMP:
+ {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ void *to;
+
+ ts->stat = SAM_STAT_GOOD;
+ to = kmap_atomic(sg_page(sg_resp));
+
+ dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ memcpy(to + sg_resp->offset,
+ slot->status_buffer +
+ sizeof(struct hisi_sas_err_record),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to);
+ break;
+ }
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ ts->stat = SAM_STAT_GOOD;
+ sata_done_v2_hw(hisi_hba, task, slot);
+ break;
+ }
+ default:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+
+ if (!slot->port->port_attached) {
+ dev_err(dev, "slot complete: port %d has removed\n",
+ slot->port->sas_port.id);
+ ts->stat = SAS_PHY_DOWN;
+ }
+
+out:
+ if (sas_dev && sas_dev->running_req)
+ sas_dev->running_req--;
+
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ sts = ts->stat;
+
+ if (task->task_done)
+ task->task_done(task);
+
+ return sts;
+}
+
+static u8 get_ata_protocol(u8 cmd, int direction)
+{
+ switch (cmd) {
+ case ATA_CMD_FPDMA_WRITE:
+ case ATA_CMD_FPDMA_READ:
+ return SATA_PROTOCOL_FPDMA;
+
+ case ATA_CMD_ID_ATA:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_READ_LOG_EXT:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PMP_WRITE:
+ case ATA_CMD_WRITE_LOG_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ return SATA_PROTOCOL_PIO;
+
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_QUEUED:
+ case ATA_CMD_WRITE_LOG_DMA_EXT:
+ return SATA_PROTOCOL_DMA;
+
+ case ATA_CMD_DOWNLOAD_MICRO:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_CHK_POWER:
+ case ATA_CMD_FLUSH:
+ case ATA_CMD_FLUSH_EXT:
+ case ATA_CMD_VERIFY:
+ case ATA_CMD_VERIFY_EXT:
+ case ATA_CMD_SET_FEATURES:
+ case ATA_CMD_STANDBY:
+ case ATA_CMD_STANDBYNOW1:
+ return SATA_PROTOCOL_NONDATA;
+ default:
+ if (direction == DMA_NONE)
+ return SATA_PROTOCOL_NONDATA;
+ return SATA_PROTOCOL_PIO;
+ }
+}
+
+static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *device = task->dev;
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct hisi_sas_port *port = device->port->lldd_port;
+ u8 *buf_cmd;
+ int has_data = 0, rc = 0, hdr_tag = 0;
+ u32 dw1 = 0, dw2 = 0;
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+ else
+ hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+
+ /* dw1 */
+ switch (task->data_dir) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+
+ if (0 == task->ata_task.fis.command)
+ dw1 |= 1 << CMD_HDR_RESET_OFF;
+
+ dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
+ << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ /* dw2 */
+ if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
+ }
+
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
+ 2 << CMD_HDR_SG_MOD_OFF;
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ /* dw3 */
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ buf_cmd = slot->command_table;
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+
+ return 0;
+}
+
+static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int i, res = 0;
+ u32 context, port_id, link_rate, hard_phy_linkrate;
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
+ struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
+
+ /* Check for SATA dev */
+ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+ if (context & (1 << phy_no))
+ goto end;
+
+ if (phy_no == 8) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF;
+ link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+ PORT_STATE_PHY8_CONN_RATE_OFF;
+ } else {
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ }
+
+ if (port_id == 0xf) {
+ dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ for (i = 0; i < 6; i++) {
+ u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
+ RX_IDAF_DWORD0 + (i * 4));
+ frame_rcvd[i] = __swab32(idaf);
+ }
+
+ /* Get the linkrates */
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE);
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ phy->port_id = port_id;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->phy_attached = 1;
+ phy->identify.device_type = id->dev_type;
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_PHY_ENABLE_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
+
+ return res;
+}
+
+static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int res = 0;
+ u32 phy_cfg, phy_state;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
+
+ phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
+
+ return res;
+}
+
+static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_msk;
+ int phy_no = 0;
+ irqreturn_t res = IRQ_HANDLED;
+
+ irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
+ >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
+ while (irq_msk) {
+ if (irq_msk & 1) {
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+
+ if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+ /* phy up */
+ if (phy_up_v2_hw(phy_no, hisi_hba)) {
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ if (irq_value & CHL_INT0_NOT_RDY_MSK)
+ /* phy down */
+ if (phy_down_v2_hw(phy_no, hisi_hba)) {
+ res = IRQ_NONE;
+ goto end;
+ }
+ }
+ irq_msk >>= 1;
+ phy_no++;
+ }
+
+end:
+ return res;
+}
+
+static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ unsigned long flags;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+}
+
+static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 ent_msk, ent_tmp, irq_msk;
+ int phy_no = 0;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ ent_tmp = ent_msk;
+ ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
+
+ irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >>
+ HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
+
+ while (irq_msk) {
+ if (irq_msk & (1 << phy_no)) {
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if (irq_value1) {
+ if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
+ CHL_INT1_DMAC_TX_ECC_ERR_MSK))
+ panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
+ dev_name(dev), irq_value1);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
+
+ if (irq_value2)
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+
+
+ if (irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
+ }
+ }
+ irq_msk &= ~(1 << phy_no);
+ phy_no++;
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct hisi_sas_itct *itct;
+ struct hisi_sas_complete_v2_hdr *complete_queue;
+ u32 irq_value, rd_point, wr_point, dev_id;
+ int queue = cq->id;
+
+ complete_queue = hisi_hba->complete_hdr[queue];
+ irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
+
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+ rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
+ (0x14 * queue));
+ wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
+ (0x14 * queue));
+
+ while (rd_point != wr_point) {
+ struct hisi_sas_complete_v2_hdr *complete_hdr;
+ int iptt;
+
+ complete_hdr = &complete_queue[rd_point];
+
+ /* Check for NCQ completion */
+ if (complete_hdr->act) {
+ u32 act_tmp = complete_hdr->act;
+ int ncq_tag_count = ffs(act_tmp);
+
+ dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+ CMPLT_HDR_DEV_ID_OFF;
+ itct = &hisi_hba->itct[dev_id];
+
+ /* The NCQ tags are held in the itct header */
+ while (ncq_tag_count) {
+ __le64 *ncq_tag = &itct->qw4_15[0];
+
+ ncq_tag_count -= 1;
+ iptt = (ncq_tag[ncq_tag_count / 5]
+ >> (ncq_tag_count % 5) * 12) & 0xfff;
+
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v2_hw(hisi_hba, slot, 0);
+
+ act_tmp &= ~(1 << ncq_tag_count);
+ ncq_tag_count = ffs(act_tmp);
+ }
+ } else {
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v2_hw(hisi_hba, slot, 0);
+ }
+
+ if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
+ rd_point = 0;
+ }
+
+ /* update rd_point */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_sas_phy *phy = p;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_initial_fis *initial_fis;
+ struct dev_to_host_fis *fis;
+ u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
+ irqreturn_t res = IRQ_HANDLED;
+ u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+ int phy_no;
+
+ phy_no = sas_phy->id;
+ initial_fis = &hisi_hba->initial_fis[phy_no];
+ fis = &initial_fis->fis;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no);
+
+ ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1);
+ ent_tmp = ent_int;
+ ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
+ if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
+ dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ if (unlikely(phy_no == 8)) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF;
+ link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+ PORT_STATE_PHY8_CONN_RATE_OFF;
+ } else {
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ }
+
+ if (port_id == 0xf) {
+ dev_err(dev, "sata int: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ /* Make up some unique SAS address */
+ attached_sas_addr[0] = 0x50;
+ attached_sas_addr[7] = phy_no;
+ memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
+ memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
+ dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ phy->port_id = port_id;
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->identify.device_type = SAS_SATA_DEV;
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+
+ return res;
+}
+
+static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
+ int_phy_updown_v2_hw,
+ int_chnl_int_v2_hw,
+};
+
+/**
+ * There is a limitation in the hip06 chipset that we need
+ * to map in all mbigen interrupts, even if they are not used.
+ */
+static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->pdev;
+ struct device *dev = &pdev->dev;
+ int i, irq, rc, irq_map[128];
+
+
+ for (i = 0; i < 128; i++)
+ irq_map[i] = platform_get_irq(pdev, i);
+
+ for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
+ if (!irq) {
+ dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
+ DRV_NAME " phy", hisi_hba);
+ if (rc) {
+ dev_err(dev, "irq init: could not request "
+ "phy interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+ int idx = i + 72; /* First SATA interrupt is irq72 */
+
+ irq = irq_map[idx];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
+ DRV_NAME " sata", phy);
+ if (rc) {
+ dev_err(dev, "irq init: could not request "
+ "sata interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ int idx = i + 96; /* First cq interrupt is irq96 */
+
+ irq = irq_map[idx];
+ if (!irq) {
+ dev_err(dev,
+ "irq init: could not map cq interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+ rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
+ DRV_NAME " cq", &hisi_hba->cq[i]);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request cq interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
+{
+ int rc;
+
+ rc = hw_init_v2_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ rc = interrupt_init_v2_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ phys_init_v2_hw(hisi_hba);
+
+ return 0;
+}
+
+static const struct hisi_sas_hw hisi_sas_v2_hw = {
+ .hw_init = hisi_sas_v2_init,
+ .setup_itct = setup_itct_v2_hw,
+ .sl_notify = sl_notify_v2_hw,
+ .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .free_device = free_device_v2_hw,
+ .prep_smp = prep_smp_v2_hw,
+ .prep_ssp = prep_ssp_v2_hw,
+ .prep_stp = prep_ata_v2_hw,
+ .get_free_slot = get_free_slot_v2_hw,
+ .start_delivery = start_delivery_v2_hw,
+ .slot_complete = slot_complete_v2_hw,
+ .phy_enable = enable_phy_v2_hw,
+ .phy_disable = disable_phy_v2_hw,
+ .phy_hard_reset = phy_hard_reset_v2_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
+ .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
+};
+
+static int hisi_sas_v2_probe(struct platform_device *pdev)
+{
+ return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
+}
+
+static int hisi_sas_v2_remove(struct platform_device *pdev)
+{
+ return hisi_sas_remove(pdev);
+}
+
+static const struct of_device_id sas_v2_of_match[] = {
+ { .compatible = "hisilicon,hip06-sas-v2",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sas_v2_of_match);
+
+static struct platform_driver hisi_sas_v2_driver = {
+ .probe = hisi_sas_v2_probe,
+ .remove = hisi_sas_v2_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = sas_v2_of_match,
+ },
+};
+
+module_platform_driver(hisi_sas_v2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 82ac1cd818ac..1547bd93c70b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -33,7 +33,7 @@
#include <linux/transport_class.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
+#include <linux/idr.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -42,7 +42,7 @@
#include "scsi_logging.h"
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
+static DEFINE_IDA(host_index_ida);
static void scsi_host_cls_release(struct device *dev)
@@ -250,6 +250,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto out_destroy_freelist;
+ /*
+ * Increase usage count temporarily here so that calling
+ * scsi_autopm_put_host() will trigger runtime idle if there is
+ * nothing else preventing suspending the device.
+ */
+ pm_runtime_get_noresume(&shost->shost_gendev);
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
@@ -290,6 +296,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto out_destroy_host;
scsi_proc_host_add(shost);
+ scsi_autopm_put_host(shost);
return error;
out_destroy_host:
@@ -355,6 +362,8 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost->shost_data);
+ ida_simple_remove(&host_index_ida, shost->host_no);
+
if (parent)
put_device(parent);
kfree(shost);
@@ -388,6 +397,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
gfp_t gfp_mask = GFP_KERNEL;
+ int index;
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
@@ -406,11 +416,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- /*
- * subtract one because we increment first then return, but we need to
- * know what the next host number was before increment
- */
- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+ index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto fail_kfree;
+ shost->host_no = index;
+
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
@@ -495,7 +505,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_kfree;
+ goto fail_index_remove;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -511,6 +521,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
fail_kthread:
kthread_stop(shost->ehandler);
+ fail_index_remove:
+ ida_simple_remove(&host_index_ida, shost->host_no);
fail_kfree:
kfree(shost);
return NULL;
@@ -606,6 +618,7 @@ int scsi_init_hosts(void)
void scsi_exit_hosts(void)
{
class_unregister(&shost_class);
+ ida_destroy(&host_index_ida);
}
int scsi_is_host_device(const struct device *dev)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 38ce0e308fbe..5be944c8b71c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
@@ -809,7 +810,8 @@ static ssize_t path_info_show(struct device *dev,
PAGE_SIZE - output_len,
"PORT: %.2s ",
phys_connector);
- if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
+ if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
+ hdev->expose_device) {
if (box == 0 || box == 0xFF) {
output_len += scnprintf(buf + output_len,
PAGE_SIZE - output_len,
@@ -1166,6 +1168,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
snprintf(label, LABEL_SIZE, "enclosure");
break;
case TYPE_DISK:
+ case TYPE_ZBC:
if (dev->external)
snprintf(label, LABEL_SIZE, "external");
else if (!is_logical_dev_addr_mode(dev->scsi3addr))
@@ -1636,6 +1639,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
continue;
if (dev[j]->devtype != TYPE_DISK)
continue;
+ if (dev[j]->devtype != TYPE_ZBC)
+ continue;
if (is_logical_device(dev[j]))
continue;
if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
@@ -1681,6 +1686,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
continue;
if (dev[i]->devtype != TYPE_DISK)
continue;
+ if (dev[i]->devtype != TYPE_ZBC)
+ continue;
if (!is_logical_device(dev[i]))
continue;
@@ -3208,8 +3215,10 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
- if (bmic_device_index == 0xFF00)
+ if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
+ rc = IO_OK;
goto out;
+ }
bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
if (!bssbp)
@@ -3657,18 +3666,6 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h,
return rc;
}
-static void sanitize_inquiry_string(unsigned char *s, int len)
-{
- bool terminated = false;
-
- for (; len > 0; (--len, ++s)) {
- if (*s == 0)
- terminated = true;
- if (terminated || *s < 0x20 || *s > 0x7e)
- *s = ' ';
- }
-}
-
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -3699,8 +3696,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
goto bail_out;
}
- sanitize_inquiry_string(&inq_buff[8], 8);
- sanitize_inquiry_string(&inq_buff[16], 16);
+ scsi_sanitize_inquiry_string(&inq_buff[8], 8);
+ scsi_sanitize_inquiry_string(&inq_buff[16], 16);
this_device->devtype = (inq_buff[0] & 0x1f);
memcpy(this_device->scsi3addr, scsi3addr, 8);
@@ -3713,7 +3710,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
sizeof(this_device->device_id));
- if (this_device->devtype == TYPE_DISK &&
+ if ((this_device->devtype == TYPE_DISK ||
+ this_device->devtype == TYPE_ZBC) &&
is_logical_dev_addr_mode(scsi3addr)) {
int volume_offline;
@@ -4181,6 +4179,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
ncurrent++;
break;
case TYPE_DISK:
+ case TYPE_ZBC:
if (this_device->physical_device) {
/* The disk is in HBA mode. */
/* Never use RAID mapper in HBA mode. */
@@ -4197,7 +4196,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
ncurrent++;
break;
case TYPE_ENCLOSURE:
- hpsa_get_enclosure_info(h, lunaddrbytes,
+ if (!this_device->external)
+ hpsa_get_enclosure_info(h, lunaddrbytes,
physdev_list, phys_dev_index,
this_device);
ncurrent++;
@@ -4970,6 +4970,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
return IO_ACCEL_INELIGIBLE;
c->phys_disk = dev->phys_disk[map_index];
+ if (!c->phys_disk)
+ return IO_ACCEL_INELIGIBLE;
disk_handle = dd[map_index].ioaccel_handle;
disk_block = le64_to_cpu(map->disk_starting_blk) +
@@ -5835,7 +5837,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
}
static int hpsa_send_abort_both_ways(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
+ struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
{
/*
* ioccelerator mode 2 commands should be aborted via the
@@ -5844,14 +5846,16 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
* Change abort to physical device reset when abort TMF is unsupported.
*/
if (abort->cmd_type == CMD_IOACCEL2) {
- if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
+ if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
+ dev->physical_device)
return hpsa_send_abort_ioaccel2(h, abort,
reply_queue);
else
- return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
+ return hpsa_send_reset_as_abort_ioaccel2(h,
+ dev->scsi3addr,
abort, reply_queue);
}
- return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
+ return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
}
/* Find out which reply queue a command was meant to return on */
@@ -5989,7 +5993,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
cmd_free(h, abort);
return FAILED;
}
- rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
+ rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
atomic_inc(&h->abort_cmds_available);
wake_up_all(&h->abort_cmd_wait_queue);
if (rc != 0) {
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index fdd39fc0b199..d06bb7417e36 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
#ifndef HPSA_H
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 6a919ada96b3..a5be153d92d4 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
#ifndef HPSA_CMD_H
@@ -289,7 +290,7 @@ struct SenseSubsystem_info {
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_SET_DIAG_OPTIONS 0xF4
#define BMIC_SENSE_DIAG_OPTIONS 0xF5
-#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x40000000
+#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x80000000
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
#define BMIC_SENSE_STORAGE_BOX_PARAMS 0x65
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 6aa317c303e2..fc523c3e5019 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2636,7 +2636,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_target *tgt;
ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
- " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
+ " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
+ be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
ibmvfc_get_link_state(crq->link_state));
switch (be64_to_cpu(crq->event)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index adfef9db6f1e..d9534ee6ef52 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -182,7 +182,7 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
- if (crq->valid & 0x80) {
+ if (crq->valid != VIOSRP_CRQ_FREE) {
if (++queue->cur == queue->size)
queue->cur = 0;
@@ -231,7 +231,7 @@ static void ibmvscsi_task(void *data)
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
+ crq->valid = VIOSRP_CRQ_FREE;
}
vio_enable_interrupts(vdev);
@@ -239,7 +239,7 @@ static void ibmvscsi_task(void *data)
if (crq != NULL) {
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
+ crq->valid = VIOSRP_CRQ_FREE;
} else {
done = 1;
}
@@ -248,25 +248,23 @@ static void ibmvscsi_task(void *data)
static void gather_partition_info(void)
{
- struct device_node *rootdn;
-
const char *ppartition_name;
const __be32 *p_number_ptr;
/* Retrieve information about this partition */
- rootdn = of_find_node_by_path("/");
- if (!rootdn) {
+ if (!of_root)
return;
- }
- ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ of_node_get(of_root);
+
+ ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
strncpy(partition_name, ppartition_name,
sizeof(partition_name));
- p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
+ p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
partition_number = of_read_number(p_number_ptr, 1);
- of_node_put(rootdn);
+ of_node_put(of_root);
}
static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
@@ -283,8 +281,8 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
hostdata->madapter_info.partition_number =
cpu_to_be32(partition_number);
- hostdata->madapter_info.mad_version = cpu_to_be32(1);
- hostdata->madapter_info.os_type = cpu_to_be32(2);
+ hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
+ hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
}
/**
@@ -316,7 +314,7 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
- if (rc == 2) {
+ if (rc == H_CLOSED) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
@@ -366,7 +364,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
- if (rc == 2) {
+ if (rc == H_CLOSED) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
@@ -474,7 +472,7 @@ static int initialize_event_pool(struct event_pool *pool,
struct srp_event_struct *evt = &pool->events[i];
memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1);
- evt->crq.valid = 0x80;
+ evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
sizeof(*evt->xfer_iu) * i);
@@ -1398,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->max_sectors =
be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
- if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
@@ -1407,7 +1405,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
- if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
enable_fast_fail(hostdata);
return;
}
@@ -1767,9 +1765,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct srp_event_struct *evt_struct =
(__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) {
- case 0xC0: /* initialization */
+ case VIOSRP_CRQ_INIT_RSP: /* initialization */
switch (crq->format) {
- case 0x01: /* Initialization message */
+ case VIOSRP_CRQ_INIT: /* Initialization message */
dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
@@ -1781,7 +1779,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
break;
- case 0x02: /* Initialization response */
+ case VIOSRP_CRQ_INIT_COMPLETE: /* Initialization response */
dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
@@ -1791,7 +1789,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
}
return;
- case 0xFF: /* Hypervisor telling us the connection is closed */
+ case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
atomic_set(&hostdata->request_limit, 0);
if (crq->format == 0x06) {
@@ -1807,7 +1805,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
ibmvscsi_reset_host(hostdata);
}
return;
- case 0x80: /* real payload */
+ case VIOSRP_CRQ_CMD_RSP: /* real payload */
break;
default:
dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
@@ -1855,62 +1853,6 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
/**
- * ibmvscsi_get_host_config: Send the command to the server to get host
- * configuration data. The data is opaque to us.
- */
-static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
- unsigned char *buffer, int length)
-{
- struct viosrp_host_config *host_config;
- struct srp_event_struct *evt_struct;
- unsigned long flags;
- dma_addr_t addr;
- int rc;
-
- evt_struct = get_event_struct(&hostdata->pool);
- if (!evt_struct) {
- dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
- return -1;
- }
-
- init_event_struct(evt_struct,
- sync_completion,
- VIOSRP_MAD_FORMAT,
- info_timeout);
-
- host_config = &evt_struct->iu.mad.host_config;
-
- /* The transport length field is only 16-bit */
- length = min(0xffff, length);
-
- /* Set up a lun reset SRP command */
- memset(host_config, 0x00, sizeof(*host_config));
- host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
- host_config->common.length = cpu_to_be16(length);
- addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(hostdata->dev, addr)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(hostdata->dev,
- "dma_mapping error getting host config\n");
- free_event_struct(&hostdata->pool, evt_struct);
- return -1;
- }
-
- host_config->buffer = cpu_to_be64(addr);
-
- init_completion(&evt_struct->comp);
- spin_lock_irqsave(hostdata->host->host_lock, flags);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- if (rc == 0)
- wait_for_completion(&evt_struct->comp);
- dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
-
- return rc;
-}
-
-/**
* ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
* @sdev: struct scsi_device device to configure
*
@@ -2041,7 +1983,7 @@ static ssize_t show_host_partition_number(struct device *dev,
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
- hostdata->madapter_info.partition_number);
+ be32_to_cpu(hostdata->madapter_info.partition_number));
return len;
}
@@ -2061,7 +2003,7 @@ static ssize_t show_host_mad_version(struct device *dev,
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
- hostdata->madapter_info.mad_version);
+ be32_to_cpu(hostdata->madapter_info.mad_version));
return len;
}
@@ -2080,7 +2022,8 @@ static ssize_t show_host_os_type(struct device *dev,
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
+ len = snprintf(buf, PAGE_SIZE, "%d\n",
+ be32_to_cpu(hostdata->madapter_info.os_type));
return len;
}
@@ -2095,21 +2038,14 @@ static struct device_attribute ibmvscsi_host_os_type = {
static ssize_t show_host_config(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ibmvscsi_host_data *hostdata = shost_priv(shost);
-
- /* returns null-terminated host config data */
- if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
- return strlen(buf);
- else
- return 0;
+ return 0;
}
static struct device_attribute ibmvscsi_host_config = {
.attr = {
- .name = "config",
- .mode = S_IRUGO,
- },
+ .name = "config",
+ .mode = S_IRUGO,
+ },
.show = show_host_config,
};
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 116243087622..c1ab8a4c3161 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -51,13 +51,25 @@ union srp_iu {
u8 reserved[SRP_MAX_IU_LEN];
};
+enum viosrp_crq_headers {
+ VIOSRP_CRQ_FREE = 0x00,
+ VIOSRP_CRQ_CMD_RSP = 0x80,
+ VIOSRP_CRQ_INIT_RSP = 0xC0,
+ VIOSRP_CRQ_XPORT_EVENT = 0xFF
+};
+
+enum viosrp_crq_init_formats {
+ VIOSRP_CRQ_INIT = 0x01,
+ VIOSRP_CRQ_INIT_COMPLETE = 0x02
+};
+
enum viosrp_crq_formats {
VIOSRP_SRP_FORMAT = 0x01,
VIOSRP_MAD_FORMAT = 0x02,
VIOSRP_OS400_FORMAT = 0x03,
VIOSRP_AIX_FORMAT = 0x04,
- VIOSRP_LINUX_FORMAT = 0x06,
- VIOSRP_INLINE_FORMAT = 0x07
+ VIOSRP_LINUX_FORMAT = 0x05,
+ VIOSRP_INLINE_FORMAT = 0x06
};
enum viosrp_crq_status {
@@ -87,7 +99,6 @@ enum viosrp_mad_types {
VIOSRP_EMPTY_IU_TYPE = 0x01,
VIOSRP_ERROR_LOG_TYPE = 0x02,
VIOSRP_ADAPTER_INFO_TYPE = 0x03,
- VIOSRP_HOST_CONFIG_TYPE = 0x04,
VIOSRP_CAPABILITIES_TYPE = 0x05,
VIOSRP_ENABLE_FAST_FAIL = 0x08,
};
@@ -153,11 +164,6 @@ struct viosrp_adapter_info {
__be64 buffer;
};
-struct viosrp_host_config {
- struct mad_common common;
- __be64 buffer;
-};
-
struct viosrp_fast_fail {
struct mad_common common;
};
@@ -195,7 +201,6 @@ union mad_iu {
struct viosrp_empty_iu empty_iu;
struct viosrp_error_log error_log;
struct viosrp_adapter_info adapter_info;
- struct viosrp_host_config host_config;
struct viosrp_fast_fail fast_fail;
struct viosrp_capabilities capabilities;
};
@@ -209,7 +214,10 @@ struct mad_adapter_info_data {
char srp_version[8];
char partition_name[96];
__be32 partition_number;
+#define SRP_MAD_VERSION_1 1
__be32 mad_version;
+#define SRP_MAD_OS_LINUX 2
+#define SRP_MAD_OS_AIX 3
__be32 os_type;
__be32 port_max_txu[8]; /* per-port maximum transfer */
};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f8b88fa78e62..9164ce1249c1 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -77,9 +77,10 @@ static void imm_wakeup(void *ref)
spin_lock_irqsave(&arbitration_lock, flags);
if (dev->wanted) {
- parport_claim(dev->dev);
- got_it(dev);
- dev->wanted = 0;
+ if (parport_claim(dev->dev) == 0) {
+ got_it(dev);
+ dev->wanted = 0;
+ }
}
spin_unlock_irqrestore(&arbitration_lock, flags);
}
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 680bf6f0ce76..8f0ea97cf31f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -166,6 +166,7 @@ static struct attribute_group iscsi_boot_target_attr_group = {
iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX);
iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS);
iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR);
+iscsi_boot_rd_attr(eth_prefix, prefix-len, ISCSI_BOOT_ETH_PREFIX_LEN);
iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK);
iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN);
iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY);
@@ -181,6 +182,7 @@ static struct attribute *ethernet_attrs[] = {
&iscsi_boot_attr_eth_index.attr,
&iscsi_boot_attr_eth_flags.attr,
&iscsi_boot_attr_eth_ip.attr,
+ &iscsi_boot_attr_eth_prefix.attr,
&iscsi_boot_attr_eth_subnet.attr,
&iscsi_boot_attr_eth_origin.attr,
&iscsi_boot_attr_eth_gateway.attr,
@@ -208,6 +210,9 @@ static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
else if (attr == &iscsi_boot_attr_eth_ip.attr)
return boot_kobj->is_visible(boot_kobj->data,
ISCSI_BOOT_ETH_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_eth_prefix.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_PREFIX_LEN);
else if (attr == &iscsi_boot_attr_eth_subnet.attr)
return boot_kobj->is_visible(boot_kobj->data,
ISCSI_BOOT_ETH_SUBNET_MASK);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0b8af186e707..2e4c82f8329c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -26,12 +26,12 @@
* Zhenyu Wang
*/
+#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
@@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
+ iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct hash_desc *tx_hash = NULL;
+ struct ahash_request *tx_hash = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_sw_conn->tx_hash;
+ tx_hash = tcp_sw_conn->tx_hash;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
sg, count, offset, len,
@@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct hash_desc *tx_hash = NULL;
+ struct ahash_request *tx_hash = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_sw_conn->tx_hash;
+ tx_hash = tcp_sw_conn->tx_hash;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
data, len, NULL, tx_hash);
@@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_sw_conn->tx_hash.flags = 0;
- if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
goto free_conn;
- tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_sw_conn->rx_hash.flags = 0;
- if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
- goto free_tx_tfm;
- tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
+ tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!tcp_sw_conn->tx_hash)
+ goto free_tfm;
+ ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
+
+ tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!tcp_sw_conn->rx_hash)
+ goto free_tx_hash;
+ ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
+
+ tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
return cls_conn;
-free_tx_tfm:
- crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+free_tx_hash:
+ ahash_request_free(tcp_sw_conn->tx_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
free_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
@@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
iscsi_sw_tcp_release_conn(conn);
- if (tcp_sw_conn->tx_hash.tfm)
- crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
- if (tcp_sw_conn->rx_hash.tfm)
- crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
+ ahash_request_free(tcp_sw_conn->rx_hash);
+ if (tcp_sw_conn->tx_hash) {
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
+ ahash_request_free(tcp_sw_conn->tx_hash);
+ crypto_free_ahash(tfm);
+ }
iscsi_tcp_conn_teardown(cls_conn);
}
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index f42ecb238af5..06d42d00a323 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct hash_desc tx_hash; /* CRC32C (Tx) */
- struct hash_desc rx_hash; /* CRC32C (Rx) */
+ struct ahash_request *tx_hash; /* CRC32C (Tx) */
+ struct ahash_request *rx_hash; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 60cb6dc3c6f0..63a1d69ff515 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -26,13 +26,13 @@
* Zhenyu Wang
*/
+#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
@@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
} else
sg_init_one(&sg, segment->data + segment->copied,
copied);
- crypto_hash_update(segment->hash, &sg, copied);
+ ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
+ crypto_ahash_update(segment->hash);
}
segment->copied += copied;
@@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* is completely handled in hdr done function.
*/
if (segment->hash) {
- crypto_hash_final(segment->hash, segment->digest);
+ ahash_request_set_crypt(segment->hash, NULL,
+ segment->digest, 0);
+ crypto_ahash_final(segment->hash);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
- unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
+ size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
{
struct scatterlist sg;
sg_init_one(&sg, hdr, hdrlen);
- crypto_hash_digest(hash, &sg, hdrlen, digest);
+ ahash_request_set_crypt(hash, &sg, digest, hdrlen);
+ crypto_ahash_digest(hash);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+ iscsi_segment_done_fn_t *done, struct ahash_request *hash)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
@@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
if (hash) {
segment->hash = hash;
- crypto_hash_init(hash);
+ crypto_ahash_init(hash);
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
size_t size, iscsi_segment_done_fn_t *done,
- struct hash_desc *hash)
+ struct ahash_request *hash)
{
__iscsi_segment_init(segment, size, done, hash);
segment->data = data;
@@ -368,7 +372,8 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+ iscsi_segment_done_fn_t *done,
+ struct ahash_request *hash)
{
struct scatterlist *sg;
unsigned int i;
@@ -431,7 +436,7 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct hash_desc *rx_hash = NULL;
+ struct ahash_request *rx_hash = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
@@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct hash_desc *rx_hash = NULL;
+ struct ahash_request *rx_hash = NULL;
struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 25aa9b98d53a..a63542bac153 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1054,11 +1054,11 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
{
struct dentry *dent = file->f_path.dentry;
struct lpfc_hba *phba = file->private_data;
- char dstbuf[32];
+ char dstbuf[33];
uint64_t tmp = 0;
int size;
- memset(dstbuf, 0, 32);
+ memset(dstbuf, 0, 33);
size = (nbytes < 32) ? nbytes : 32;
if (copy_from_user(dstbuf, buf, size))
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c37d72effbff..25b5dcd1a5c8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1314,6 +1315,8 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
+ lockdep_assert_held(&phba->hbalock);
+
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1398,6 +1401,8 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
struct fcf_record *new_fcf_record, uint32_t addr_mode,
uint16_t vlan_id, uint32_t flag)
{
+ lockdep_assert_held(&phba->hbalock);
+
/* Copy the fields from the HBA's FCF record */
lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
/* Update other fields of driver FCF record */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 152b3c8a5428..3bd0be6277b3 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4139,23 +4139,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
- if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_cmd->pCmd = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- /*
- * If there is a thread waiting for command completion
- * wake up the thread.
- */
- spin_lock_irqsave(shost->host_lock, flags);
- if (lpfc_cmd->waitq)
- wake_up(lpfc_cmd->waitq);
- spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_release_scsi_buf(phba, lpfc_cmd);
- return;
- }
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 92dfd6a5178c..2207726b88ee 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/lockdep.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -576,6 +577,8 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
struct lpfc_iocbq * iocbq = NULL;
+ lockdep_assert_held(&phba->hbalock);
+
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
if (iocbq)
phba->iocb_cnt++;
@@ -797,6 +800,7 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
+ lockdep_assert_held(&phba->hbalock);
if (!ndlp)
return 0;
if (!ndlp->active_rrqs_xri_bitmap)
@@ -914,6 +918,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_nodelist *ndlp;
int found = 0;
+ lockdep_assert_held(&phba->hbalock);
+
if (piocbq->iocb_flag & LPFC_IO_FCP) {
lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
@@ -1003,6 +1009,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ lockdep_assert_held(&phba->hbalock);
+
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1058,6 +1066,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ lockdep_assert_held(&phba->hbalock);
/*
* Clean all volatile data fields, preserve iotag and node struct.
@@ -1080,6 +1089,8 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
+ lockdep_assert_held(&phba->hbalock);
+
phba->__lpfc_sli_release_iocbq(phba, iocbq);
phba->iocb_cnt--;
}
@@ -1310,6 +1321,8 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
+ lockdep_assert_held(&phba->hbalock);
+
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
@@ -1344,6 +1357,8 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_iocbq *cmd_iocb;
+ lockdep_assert_held(&phba->hbalock);
+
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
return cmd_iocb;
}
@@ -1367,6 +1382,9 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
+
+ lockdep_assert_held(&phba->hbalock);
+
if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
(++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
pring->sli.sli3.next_cmdidx = 0;
@@ -1497,6 +1515,7 @@ static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
+ lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
@@ -1606,6 +1625,8 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
IOCB_t *iocb;
struct lpfc_iocbq *nextiocb;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* Check to see if:
* (a) there is anything on the txq to send
@@ -1647,6 +1668,8 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
struct hbq_s *hbqp = &phba->hbqs[hbqno];
+ lockdep_assert_held(&phba->hbalock);
+
if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
++hbqp->next_hbqPutIdx >= hbqp->entry_count)
hbqp->next_hbqPutIdx = 0;
@@ -1747,6 +1770,7 @@ static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
+ lockdep_assert_held(&phba->hbalock);
return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
}
@@ -1768,6 +1792,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
struct lpfc_hbq_entry *hbqe;
dma_addr_t physaddr = hbq_buf->dbuf.phys;
+ lockdep_assert_held(&phba->hbalock);
/* Get next HBQ entry slot to use */
hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
if (hbqe) {
@@ -1808,6 +1833,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
+ lockdep_assert_held(&phba->hbalock);
hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
@@ -1986,6 +2012,8 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
struct hbq_dmabuf *hbq_buf;
uint32_t hbqno;
+ lockdep_assert_held(&phba->hbalock);
+
hbqno = tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
return NULL;
@@ -2647,6 +2675,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
+ lockdep_assert_held(&phba->hbalock);
iotag = prspiocb->iocb.ulpIoTag;
@@ -2685,6 +2714,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
{
struct lpfc_iocbq *cmd_iocb;
+ lockdep_assert_held(&phba->hbalock);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
@@ -3799,6 +3829,8 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
int i;
uint8_t hdrtype;
+ lockdep_assert_held(&phba->hbalock);
+
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
if (hdrtype != 0x80 ||
(FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
@@ -7861,6 +7893,7 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -7888,6 +7921,8 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_iocbq * nextiocb;
+ lockdep_assert_held(&phba->hbalock);
+
nextiocb = lpfc_sli_ringtx_get(phba, pring);
if (!nextiocb) {
nextiocb = *piocb;
@@ -7927,6 +7962,8 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
IOCB_t *iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ lockdep_assert_held(&phba->hbalock);
+
if (piocb->iocb_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -8642,6 +8679,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ lockdep_assert_held(&phba->hbalock);
+
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
@@ -9752,6 +9791,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
int retval;
unsigned long iflags;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
@@ -9854,6 +9895,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
int retval = IOCB_ERROR;
IOCB_t *icmd = NULL;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 141226631429..a6682c508c4c 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -18,11 +18,11 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index c0f7c8ce54aa..4484e63033a5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.808.16.00-rc1"
-#define MEGASAS_RELDATE "Oct. 8, 2015"
+#define MEGASAS_VERSION "06.810.09.00-rc1"
+#define MEGASAS_RELDATE "Jan. 28, 2016"
/*
* Device IDs
@@ -152,6 +152,7 @@
#define MFI_RESET_FLAGS MFI_INIT_READY| \
MFI_INIT_MFIMODE| \
MFI_INIT_ABORT
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
/*
* MFI frame flags
@@ -170,6 +171,7 @@
/* Driver internal */
#define DRV_DCMD_POLLED_MODE 0x1
+#define DRV_DCMD_SKIP_REFIRE 0x2
/*
* Definition for cmd_status
@@ -214,6 +216,7 @@
#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+#define MR_DCMD_PD_GET_INFO 0x02020000
/*
* Global functions
@@ -390,6 +393,7 @@ enum MR_EVT_ARGS {
#define SGE_BUFFER_SIZE 4096
+#define MEGASAS_CLUSTER_ID_SIZE 16
/*
* define constants for device list query options
*/
@@ -434,6 +438,257 @@ enum MR_PD_STATE {
MR_PD_STATE_SYSTEM = 0x40,
};
+union MR_PD_REF {
+ struct {
+ u16 deviceId;
+ u16 seqNum;
+ } mrPdRef;
+ u32 ref;
+};
+
+/*
+ * define the DDF Type bit structure
+ */
+union MR_PD_DDF_TYPE {
+ struct {
+ union {
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 forcedPDGUID:1;
+ u16 inVD:1;
+ u16 isGlobalSpare:1;
+ u16 isSpare:1;
+ u16 isForeign:1;
+ u16 reserved:7;
+ u16 intf:4;
+#else
+ u16 intf:4;
+ u16 reserved:7;
+ u16 isForeign:1;
+ u16 isSpare:1;
+ u16 isGlobalSpare:1;
+ u16 inVD:1;
+ u16 forcedPDGUID:1;
+#endif
+ } pdType;
+ u16 type;
+ };
+ u16 reserved;
+ } ddf;
+ struct {
+ u32 reserved;
+ } nonDisk;
+ u32 type;
+} __packed;
+
+/*
+ * defines the progress structure
+ */
+union MR_PROGRESS {
+ struct {
+ u16 progress;
+ union {
+ u16 elapsedSecs;
+ u16 elapsedSecsForLastPercent;
+ };
+ } mrProgress;
+ u32 w;
+} __packed;
+
+/*
+ * defines the physical drive progress structure
+ */
+struct MR_PD_PROGRESS {
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+ u32 copyBack:1;
+ u32 erase:1;
+ u32 locate:1;
+ u32 reserved:26;
+#else
+ u32 reserved:26;
+ u32 locate:1;
+ u32 erase:1;
+ u32 copyBack:1;
+ u32 clear:1;
+ u32 patrol:1;
+ u32 rbld:1;
+#endif
+ } active;
+ union MR_PROGRESS rbld;
+ union MR_PROGRESS patrol;
+ union {
+ union MR_PROGRESS clear;
+ union MR_PROGRESS erase;
+ };
+
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+ u32 copyBack:1;
+ u32 erase:1;
+ u32 reserved:27;
+#else
+ u32 reserved:27;
+ u32 erase:1;
+ u32 copyBack:1;
+ u32 clear:1;
+ u32 patrol:1;
+ u32 rbld:1;
+#endif
+ } pause;
+
+ union MR_PROGRESS reserved[3];
+} __packed;
+
+struct MR_PD_INFO {
+ union MR_PD_REF ref;
+ u8 inquiryData[96];
+ u8 vpdPage83[64];
+ u8 notSupported;
+ u8 scsiDevType;
+
+ union {
+ u8 connectedPortBitmap;
+ u8 connectedPortNumbers;
+ };
+
+ u8 deviceSpeed;
+ u32 mediaErrCount;
+ u32 otherErrCount;
+ u32 predFailCount;
+ u32 lastPredFailEventSeqNum;
+
+ u16 fwState;
+ u8 disabledForRemoval;
+ u8 linkSpeed;
+ union MR_PD_DDF_TYPE state;
+
+ struct {
+ u8 count;
+#ifndef __BIG_ENDIAN_BITFIELD
+ u8 isPathBroken:4;
+ u8 reserved3:3;
+ u8 widePortCapable:1;
+#else
+ u8 widePortCapable:1;
+ u8 reserved3:3;
+ u8 isPathBroken:4;
+#endif
+
+ u8 connectorIndex[2];
+ u8 reserved[4];
+ u64 sasAddr[2];
+ u8 reserved2[16];
+ } pathInfo;
+
+ u64 rawSize;
+ u64 nonCoercedSize;
+ u64 coercedSize;
+ u16 enclDeviceId;
+ u8 enclIndex;
+
+ union {
+ u8 slotNumber;
+ u8 enclConnectorIndex;
+ };
+
+ struct MR_PD_PROGRESS progInfo;
+ u8 badBlockTableFull;
+ u8 unusableInCurrentConfig;
+ u8 vpdPage83Ext[64];
+ u8 powerState;
+ u8 enclPosition;
+ u32 allowedOps;
+ u16 copyBackPartnerId;
+ u16 enclPartnerDeviceId;
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 fdeCapable:1;
+ u16 fdeEnabled:1;
+ u16 secured:1;
+ u16 locked:1;
+ u16 foreign:1;
+ u16 needsEKM:1;
+ u16 reserved:10;
+#else
+ u16 reserved:10;
+ u16 needsEKM:1;
+ u16 foreign:1;
+ u16 locked:1;
+ u16 secured:1;
+ u16 fdeEnabled:1;
+ u16 fdeCapable:1;
+#endif
+ } security;
+ u8 mediaType;
+ u8 notCertified;
+ u8 bridgeVendor[8];
+ u8 bridgeProductIdentification[16];
+ u8 bridgeProductRevisionLevel[4];
+ u8 satBridgeExists;
+
+ u8 interfaceType;
+ u8 temperature;
+ u8 emulatedBlockSize;
+ u16 userDataBlockSize;
+ u16 reserved2;
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u32 piType:3;
+ u32 piFormatted:1;
+ u32 piEligible:1;
+ u32 NCQ:1;
+ u32 WCE:1;
+ u32 commissionedSpare:1;
+ u32 emergencySpare:1;
+ u32 ineligibleForSSCD:1;
+ u32 ineligibleForLd:1;
+ u32 useSSEraseType:1;
+ u32 wceUnchanged:1;
+ u32 supportScsiUnmap:1;
+ u32 reserved:18;
+#else
+ u32 reserved:18;
+ u32 supportScsiUnmap:1;
+ u32 wceUnchanged:1;
+ u32 useSSEraseType:1;
+ u32 ineligibleForLd:1;
+ u32 ineligibleForSSCD:1;
+ u32 emergencySpare:1;
+ u32 commissionedSpare:1;
+ u32 WCE:1;
+ u32 NCQ:1;
+ u32 piEligible:1;
+ u32 piFormatted:1;
+ u32 piType:3;
+#endif
+ } properties;
+
+ u64 shieldDiagCompletionTime;
+ u8 shieldCounter;
+
+ u8 linkSpeedOther;
+ u8 reserved4[2];
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u32 bbmErrCountSupported:1;
+ u32 bbmErrCount:31;
+#else
+ u32 bbmErrCount:31;
+ u32 bbmErrCountSupported:1;
+#endif
+ } bbmErr;
+
+ u8 reserved1[512-428];
+} __packed;
/*
* defines the physical drive address structure
@@ -473,6 +728,7 @@ struct megasas_pd_list {
u16 tid;
u8 driveType;
u8 driveState;
+ u8 interface;
} __packed;
/*
@@ -972,7 +1228,8 @@ struct megasas_ctrl_info {
*/
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:26;
+ u32 reserved:25;
+ u32 passive:1;
u32 premiumFeatureMismatch:1;
u32 ctrlPropIncompatible:1;
u32 fwVersionMismatch:1;
@@ -986,11 +1243,12 @@ struct megasas_ctrl_info {
u32 fwVersionMismatch:1;
u32 ctrlPropIncompatible:1;
u32 premiumFeatureMismatch:1;
- u32 reserved:26;
+ u32 passive:1;
+ u32 reserved:25;
#endif
} cluster;
- char clusterId[16]; /*7D4h */
+ char clusterId[MEGASAS_CLUSTER_ID_SIZE]; /*0x7D4 */
struct {
u8 maxVFsSupported; /*0x7E4*/
u8 numVFsEnabled; /*0x7E5*/
@@ -1083,6 +1341,8 @@ struct megasas_ctrl_info {
#define VD_EXT_DEBUG 0
+#define SCAN_PD_CHANNEL 0x1
+#define SCAN_VD_CHANNEL 0x2
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
@@ -1091,6 +1351,17 @@ enum MR_SCSI_CMD_TYPE {
NON_READ_WRITE_SYSPDIO = 3,
};
+enum DCMD_TIMEOUT_ACTION {
+ INITIATE_OCR = 0,
+ KILL_ADAPTER = 1,
+ IGNORE_TIMEOUT = 2,
+};
+
+enum FW_BOOT_CONTEXT {
+ PROBE_CONTEXT = 0,
+ OCR_CONTEXT = 1,
+};
+
/* Frame Type */
#define IO_FRAME 0
#define PTHRU_FRAME 1
@@ -1137,6 +1408,7 @@ enum MR_SCSI_CMD_TYPE {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
+#define MFI_IO_TIMEOUT_SECS 180
#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
@@ -1154,6 +1426,7 @@ enum MR_SCSI_CMD_TYPE {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
+#define MR_RDPQ_MODE_OFFSET 0X00800000
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -1193,8 +1466,9 @@ struct megasas_register_set {
u32 outbound_scratch_pad ; /*00B0h*/
u32 outbound_scratch_pad_2; /*00B4h*/
+ u32 outbound_scratch_pad_3; /*00B8h*/
- u32 reserved_4[2]; /*00B8h*/
+ u32 reserved_4; /*00BCh*/
u32 inbound_low_queue_port ; /*00C0h*/
@@ -1266,7 +1540,10 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:23;
+ u32 reserved:20;
+ u32 support_qd_throttling:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_vfid_in_ioframe:1;
u32 support_ext_io_size:1;
u32 support_ext_queue_depth:1;
u32 security_protocol_cmds_fw:1;
@@ -1286,7 +1563,10 @@ typedef union _MFI_CAPABILITIES {
u32 security_protocol_cmds_fw:1;
u32 support_ext_queue_depth:1;
u32 support_ext_io_size:1;
- u32 reserved:23;
+ u32 support_vfid_in_ioframe:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_qd_throttling:1;
+ u32 reserved:20;
#endif
} mfi_capabilities;
__le32 reg;
@@ -1511,6 +1791,15 @@ union megasas_frame {
u8 raw_bytes[64];
};
+/**
+ * struct MR_PRIV_DEVICE - sdev private hostdata
+ * @is_tm_capable: firmware managed tm_capable flag
+ * @tm_busy: TM request is in progress
+ */
+struct MR_PRIV_DEVICE {
+ bool is_tm_capable;
+ bool tm_busy;
+};
struct megasas_cmd;
union megasas_evt_class_locale {
@@ -1700,6 +1989,19 @@ struct MR_DRV_SYSTEM_INFO {
u8 reserved[1980];
};
+enum MR_PD_TYPE {
+ UNKNOWN_DRIVE = 0,
+ PARALLEL_SCSI = 1,
+ SAS_PD = 2,
+ SATA_PD = 3,
+ FC_PD = 4,
+};
+
+/* JBOD Queue depth definitions */
+#define MEGASAS_SATA_QD 32
+#define MEGASAS_SAS_QD 64
+#define MEGASAS_DEFAULT_PD_QD 64
+
struct megasas_instance {
__le32 *producer;
@@ -1714,6 +2016,8 @@ struct megasas_instance {
dma_addr_t vf_affiliation_111_h;
struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
dma_addr_t hb_host_mem_h;
+ struct MR_PD_INFO *pd_info;
+ dma_addr_t pd_info_h;
__le32 *reply_queue;
dma_addr_t reply_queue_h;
@@ -1745,6 +2049,8 @@ struct megasas_instance {
u16 max_fw_cmds;
u16 max_mfi_cmds;
u16 max_scsi_cmds;
+ u16 ldio_threshold;
+ u16 cur_can_queue;
u32 max_sectors_per_req;
struct megasas_aen_event *ev;
@@ -1762,7 +2068,7 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex aen_mutex;
+ struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -1775,6 +2081,7 @@ struct megasas_instance {
u32 fw_support_ieee;
atomic_t fw_outstanding;
+ atomic_t ldio_outstanding;
atomic_t fw_reset_no_pci_access;
struct megasas_instance_template *instancet;
@@ -1797,7 +2104,7 @@ struct megasas_instance {
u16 drv_supported_vd_count;
u16 drv_supported_pd_count;
- u8 adprecovery;
+ atomic_t adprecovery;
unsigned long last_time;
u32 mfiStatus;
u32 last_seq_num;
@@ -1822,11 +2129,14 @@ struct megasas_instance {
char skip_heartbeat_timer_del;
u8 requestorId;
char PlasmaFW111;
- char mpio;
+ char clusterId[MEGASAS_CLUSTER_ID_SIZE];
+ u8 peerIsPresent;
+ u8 passive;
u16 throttlequeuedepth;
u8 mask_interrupts;
u16 max_chain_frame_sz;
u8 is_imr;
+ u8 is_rdpq;
bool dev_handle;
};
struct MR_LD_VF_MAP {
@@ -1916,7 +2226,7 @@ struct megasas_instance_template {
u32 (*init_adapter)(struct megasas_instance *);
u32 (*build_and_issue_cmd) (struct megasas_instance *,
struct scsi_cmnd *);
- void (*issue_dcmd) (struct megasas_instance *instance,
+ int (*issue_dcmd)(struct megasas_instance *instance,
struct megasas_cmd *cmd);
};
@@ -2014,6 +2324,19 @@ struct megasas_mgmt_info {
int max_index;
};
+enum MEGASAS_OCR_CAUSE {
+ FW_FAULT_OCR = 0,
+ SCSIIO_TIMEOUT_OCR = 1,
+ MFI_IO_TIMEOUT_OCR = 2,
+};
+
+enum DCMD_RETURN_STATUS {
+ DCMD_SUCCESS = 0,
+ DCMD_TIMEOUT = 1,
+ DCMD_FAILED = 2,
+ DCMD_NOT_FIRED = 3,
+};
+
u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
@@ -2051,4 +2374,8 @@ void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
int megasas_cmd_type(struct scsi_cmnd *cmd);
void megasas_setup_jbod_map(struct megasas_instance *instance);
+void megasas_update_sdev_properties(struct scsi_device *sdev);
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 97a1c1c33b05..5c08568ccfbf 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -83,7 +83,7 @@ module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
-int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, S_IRUGO);
MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
"before resetting adapter. Default: 180");
@@ -92,6 +92,18 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+int rdpq_enable = 1;
+module_param(rdpq_enable, int, S_IRUGO);
+MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+
+unsigned int dual_qdepth_disable;
+module_param(dual_qdepth_disable, int, S_IRUGO);
+MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
+
+unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+module_param(scmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
@@ -104,6 +116,8 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
/*
* PCI ID table for all supported controllers
*/
@@ -189,18 +203,18 @@ int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
int seconds);
void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
int megasas_check_mpio_paths(struct megasas_instance *instance,
struct scsi_cmnd *scmd);
-void
+int
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr, 0, instance->reg_set);
+ return 0;
}
/**
@@ -473,7 +487,7 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
(le32_to_cpu(*instance->consumer) ==
MEGASAS_ADPRESET_INPROG_SIGN))
return 1;
@@ -609,7 +623,7 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
return 0;
@@ -735,6 +749,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -746,7 +761,7 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
return 0;
@@ -940,9 +955,8 @@ static int
megasas_check_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
- }
return 0;
}
@@ -983,25 +997,20 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- int seconds;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
- /*
- * Issue the frame using inbound queue port
- */
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
- /*
- * Wait for cmd_status to change
- */
- if (instance->requestorId)
- seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
- else
- seconds = MFI_POLL_TIMEOUT_SECS;
- return wait_and_poll(instance, cmd, seconds);
+ return wait_and_poll(instance, cmd, instance->requestorId ?
+ MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
}
/**
@@ -1019,21 +1028,29 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
-
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
+
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
- if (!ret)
- return 1;
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
+ __func__, __LINE__);
+ return DCMD_TIMEOUT;
+ }
} else
wait_event(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- 0 : 1;
+ DCMD_SUCCESS : DCMD_FAILED;
}
/**
@@ -1077,15 +1094,20 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cmd->sync_cmd = 1;
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
- return 1;
+ dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
+ __func__, __LINE__);
+ return DCMD_TIMEOUT;
}
} else
wait_event(instance->abort_cmd_wait_q,
@@ -1094,7 +1116,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
- return 0;
+ return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+ DCMD_SUCCESS : DCMD_FAILED;
}
/**
@@ -1621,7 +1644,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
return 0;
out_return_cmd:
megasas_return_cmd(instance, cmd);
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1634,7 +1657,7 @@ static int
megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
- unsigned long flags;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
instance = (struct megasas_instance *)
scmd->device->host->hostdata;
@@ -1648,35 +1671,38 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (instance->issuepend_done == 0)
return SCSI_MLQUEUE_HOST_BUSY;
- spin_lock_irqsave(&instance->hba_lock, flags);
/* Check for an mpio path and adjust behavior */
- if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
if (megasas_check_mpio_paths(instance, scmd) ==
(DID_RESET << 16)) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
} else {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
}
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- return SCSI_MLQUEUE_HOST_BUSY;
+ mr_device_priv_data = scmd->device->hostdata;
+ if (!mr_device_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (mr_device_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
scmd->result = 0;
@@ -1699,12 +1725,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
break;
}
- if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
- dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- return 0;
+ return instance->instancet->build_and_issue_cmd(instance, scmd);
out_done:
scmd->scsi_done(scmd);
@@ -1726,27 +1747,39 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
}
/*
-* megasas_set_dma_alignment - Set DMA alignment for PI enabled VD
+* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
*
* @sdev: OS provided scsi device
*
* Returns void
*/
-static void megasas_set_dma_alignment(struct scsi_device *sdev)
+void megasas_update_sdev_properties(struct scsi_device *sdev)
{
+ u16 pd_index = 0;
u32 device_id, ld;
struct megasas_instance *instance;
struct fusion_context *fusion;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
struct MR_LD_RAID *raid;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
instance = megasas_lookup_instance(sdev->host->host_no);
fusion = instance->ctrl_context;
+ mr_device_priv_data = sdev->hostdata;
if (!fusion)
return;
- if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) {
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+ instance->use_seqnum_jbod_fp) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ mr_device_priv_data->is_tm_capable =
+ pd_sync->seq[pd_index].capability.tmCapable;
+ } else {
device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+ sdev->id;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -1754,10 +1787,51 @@ static void megasas_set_dma_alignment(struct scsi_device *sdev)
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ mr_device_priv_data->is_tm_capable =
+ raid->capability.tmCapable;
+ }
+}
+
+static void megasas_set_device_queue_depth(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ int ret = DCMD_FAILED;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+ if (instance->pd_info) {
+ mutex_lock(&instance->hba_mutex);
+ ret = megasas_get_pd_info(instance, pd_index);
+ mutex_unlock(&instance->hba_mutex);
+ }
+
+ if (ret != DCMD_SUCCESS)
+ return;
+
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+
+ switch (instance->pd_list[pd_index].interface) {
+ case SAS_PD:
+ scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
+ break;
+
+ case SATA_PD:
+ scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
+ break;
+
+ default:
+ scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
+ }
+ }
}
}
+
static int megasas_slave_configure(struct scsi_device *sdev)
{
u16 pd_index = 0;
@@ -1774,12 +1848,14 @@ static int megasas_slave_configure(struct scsi_device *sdev)
return -ENXIO;
}
}
- megasas_set_dma_alignment(sdev);
+ megasas_set_device_queue_depth(sdev);
+ megasas_update_sdev_properties(sdev);
+
/*
* The RAID firmware may require extended timeouts.
*/
blk_queue_rq_timeout(sdev->request_queue,
- MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+ scmd_timeout * HZ);
return 0;
}
@@ -1788,6 +1864,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
{
u16 pd_index = 0;
struct megasas_instance *instance ;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
instance = megasas_lookup_instance(sdev->host->host_no);
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
@@ -1799,13 +1876,26 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
sdev->id;
if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState ==
MR_PD_STATE_SYSTEM)) {
- return 0;
+ goto scan_target;
}
return -ENXIO;
}
+
+scan_target:
+ mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
+ GFP_KERNEL);
+ if (!mr_device_priv_data)
+ return -ENOMEM;
+ sdev->hostdata = mr_device_priv_data;
return 0;
}
+static void megasas_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
/*
* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
* kill adapter
@@ -1845,7 +1935,7 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
/* Set critical error to block I/O & ioctls in case caller didn't */
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
/* Wait 1 second to ensure IO or ioctls in build have posted */
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -1854,7 +1944,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
- if (instance->mpio && instance->requestorId)
+ if (instance->requestorId && instance->peerIsPresent)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
writel(MFI_STOP_ADP,
@@ -1883,7 +1973,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
spin_lock_irqsave(instance->host->host_lock, flags);
instance->flag &= ~MEGASAS_FW_BUSY;
- instance->host->can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
}
@@ -1905,7 +1995,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
unsigned long flags;
/* If we have already declared adapter dead, donot complete cmds */
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1974,7 +2064,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
instance->instancet->disable_intr(instance);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
instance->issuepend_done = 0;
atomic_set(&instance->fw_outstanding, 0);
@@ -2054,9 +2144,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
- megasas_issue_blocked_cmd(instance, cmd, 0);
-
- if (dcmd->cmd_status) {
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
" failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
@@ -2166,9 +2254,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
- megasas_issue_blocked_cmd(instance, cmd, 0);
- if (dcmd->cmd_status) {
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
" failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
@@ -2373,21 +2460,21 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
*/
static int megasas_wait_for_outstanding(struct megasas_instance *instance)
{
- int i;
+ int i, sl, outstanding;
u32 reset_index;
u32 wait_time = MEGASAS_RESET_WAIT_TIME;
- u8 adprecovery;
unsigned long flags;
struct list_head clist_local;
struct megasas_cmd *reset_cmd;
u32 fw_state;
- u8 kill_adapter_flag;
- spin_lock_irqsave(&instance->hba_lock, flags);
- adprecovery = instance->adprecovery;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
- if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
INIT_LIST_HEAD(&clist_local);
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2398,18 +2485,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
for (i = 0; i < wait_time; i++) {
msleep(1000);
- spin_lock_irqsave(&instance->hba_lock, flags);
- adprecovery = instance->adprecovery;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
break;
}
- if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
- spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
return FAILED;
}
@@ -2447,7 +2529,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
for (i = 0; i < resetwaittime; i++) {
- int outstanding = atomic_read(&instance->fw_outstanding);
+ outstanding = atomic_read(&instance->fw_outstanding);
if (!outstanding)
break;
@@ -2466,67 +2548,60 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
i = 0;
- kill_adapter_flag = 0;
+ outstanding = atomic_read(&instance->fw_outstanding);
+ fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+
+ if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+ goto no_outstanding;
+
+ if (instance->disableOnlineCtrlReset)
+ goto kill_hba_and_failed;
do {
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
- if ((fw_state == MFI_STATE_FAULT) &&
- (instance->disableOnlineCtrlReset == 0)) {
- if (i == 3) {
- kill_adapter_flag = 2;
- break;
- }
+ if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
+ dev_info(&instance->pdev->dev,
+ "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+ __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
+ if (i == 3)
+ goto kill_hba_and_failed;
megasas_do_ocr(instance);
- kill_adapter_flag = 1;
- /* wait for 1 secs to let FW finish the pending cmds */
- msleep(1000);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
+ dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
+ __func__, __LINE__);
+
+ for (sl = 0; sl < 10; sl++)
+ msleep(500);
+
+ outstanding = atomic_read(&instance->fw_outstanding);
+
+ fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+ goto no_outstanding;
}
i++;
} while (i <= 3);
- if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
- if (instance->disableOnlineCtrlReset == 0) {
- megasas_do_ocr(instance);
+no_outstanding:
- /* wait for 5 secs to let FW finish the pending cmds */
- for (i = 0; i < wait_time; i++) {
- int outstanding =
- atomic_read(&instance->fw_outstanding);
- if (!outstanding)
- return SUCCESS;
- msleep(1000);
- }
- }
- }
+ dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
+ __func__, __LINE__);
+ return SUCCESS;
- if (atomic_read(&instance->fw_outstanding) ||
- (kill_adapter_flag == 2)) {
- dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
- /*
- * Send signal to FW to stop processing any pending cmds.
- * The controller will be taken offline by the OS now.
- */
- if ((instance->pdev->device ==
- PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device ==
- PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
- writel(MFI_STOP_ADP,
- &instance->reg_set->doorbell);
- } else {
- writel(MFI_STOP_ADP,
- &instance->reg_set->inbound_doorbell);
- }
- megasas_dump_pending_frames(instance);
- spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- return FAILED;
- }
+kill_hba_and_failed:
- dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
+ /* Reset not supported, kill adapter */
+ dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
+ " disableOnlineCtrlReset %d fw_outstanding %d \n",
+ __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
+ atomic_read(&instance->fw_outstanding));
+ megasas_dump_pending_frames(instance);
+ megaraid_sas_kill_hba(instance);
- return SUCCESS;
+ return FAILED;
}
/**
@@ -2547,7 +2622,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
scmd->cmnd[0], scmd->retries);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
return FAILED;
}
@@ -2575,7 +2650,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
unsigned long flags;
if (time_after(jiffies, scmd->jiffies_at_alloc +
- (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ (scmd_timeout * 2) * HZ)) {
return BLK_EH_NOT_HANDLED;
}
@@ -2851,6 +2926,16 @@ megasas_page_size_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
}
+static ssize_t
+megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -2859,12 +2944,15 @@ static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
megasas_fw_crash_state_show, megasas_fw_crash_state_store);
static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
+static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
+ megasas_ldio_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
&dev_attr_page_size,
+ &dev_attr_ldio_outstanding,
NULL,
};
@@ -2878,6 +2966,7 @@ static struct scsi_host_template megasas_template = {
.proc_name = "megaraid_sas",
.slave_configure = megasas_slave_configure,
.slave_alloc = megasas_slave_alloc,
+ .slave_destroy = megasas_slave_destroy,
.queuecommand = megasas_queue_command,
.eh_device_reset_handler = megasas_reset_device,
.eh_bus_reset_handler = megasas_reset_bus_host,
@@ -3277,13 +3366,13 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- instance->adprecovery);
+ atomic_read(&instance->adprecovery));
return ;
}
- if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "FW detected to be in fault"
"state, restarting it...\n");
@@ -3326,7 +3415,7 @@ process_fw_state_change_wq(struct work_struct *work)
megasas_issue_init_mfi(instance);
spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
spin_unlock_irqrestore(&instance->hba_lock, flags);
instance->instancet->enable_intr(instance);
@@ -3391,14 +3480,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
instance->instancet->disable_intr(instance);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
instance->issuepend_done = 0;
atomic_set(&instance->fw_outstanding, 0);
megasas_internal_reset_defer_cmds(instance);
dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
- fw_state, instance->adprecovery);
+ fw_state, atomic_read(&instance->adprecovery));
schedule_work(&instance->work_init);
return IRQ_HANDLED;
@@ -3852,6 +3941,92 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
}
/*
+ * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
+ * @instance: Adapter soft state
+ *
+ * Return 0 for only Fusion adapter, if driver load/unload is not in progress
+ * or FW is not under OCR.
+ */
+inline int
+dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
+
+ if (!instance->ctrl_context)
+ return KILL_ADAPTER;
+ else if (instance->unload ||
+ test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ return IGNORE_TIMEOUT;
+ else
+ return INITIATE_OCR;
+}
+
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
+{
+ int ret;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->pd_info, 0, sizeof(*instance->pd_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = cpu_to_le16(device_id);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ instance->pd_list[device_id].interface =
+ instance->pd_info->state.ddf.pdType.intf;
+ break;
+
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+/*
* megasas_get_pd_list_info - Returns FW's pd_list structure
* @instance: Adapter soft state
* @pd_list: pd_list structure
@@ -3906,42 +4081,72 @@ megasas_get_pd_list(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- /*
- * the following function will get the instance PD LIST.
- */
+ switch (ret) {
+ case DCMD_FAILED:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
+ __func__, __LINE__);
+ break;
+ }
- pd_addr = ci->addr;
+ break;
+
+ case DCMD_SUCCESS:
+ pd_addr = ci->addr;
- if (ret == 0 &&
- (le32_to_cpu(ci->count) <
- (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+ if ((le32_to_cpu(ci->count) >
+ (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
+ break;
memset(instance->local_pd_list, 0,
- MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
-
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
- le16_to_cpu(pd_addr->deviceId);
+ le16_to_cpu(pd_addr->deviceId);
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
- pd_addr->scsiDevType;
+ pd_addr->scsiDevType;
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
- MR_PD_STATE_SYSTEM;
+ MR_PD_STATE_SYSTEM;
pd_addr++;
}
+
memcpy(instance->pd_list, instance->local_pd_list,
sizeof(instance->pd_list));
+ break;
+
}
pci_free_consistent(instance->pdev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4002,33 +4207,63 @@ megasas_get_ld_list(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
-
ld_count = le32_to_cpu(ci->ldCount);
- /* the following function will get the instance PD LIST */
+ switch (ret) {
+ case DCMD_FAILED:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+
+ case DCMD_SUCCESS:
+ if (ld_count > instance->fw_supported_vd_count)
+ break;
- if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
- instance->ld_ids[ids] =
- ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
}
}
+
+ break;
}
- pci_free_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- ci,
- ci_h);
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
- megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4090,26 +4325,61 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->pad_0 = 0;
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- tgtid_count = le32_to_cpu(ci->count);
+ switch (ret) {
+ case DCMD_FAILED:
+ dev_info(&instance->pdev->dev,
+ "DCMD not supported by firmware - %s %d\n",
+ __func__, __LINE__);
+ ret = megasas_get_ld_list(instance);
+ break;
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+ case DCMD_SUCCESS:
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((tgtid_count > (instance->fw_supported_vd_count)))
+ break;
- if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
instance->ld_ids[ids] = ci->targetId[ld_index];
}
+ break;
}
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
+ ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4223,38 +4493,73 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dcmd->mbox.b[0] = 1;
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (!ret) {
+ switch (ret) {
+ case DCMD_SUCCESS:
memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ /* Save required controller information in
+ * CPU endianness format.
+ */
le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+
+ /* Update the latest Ext VD info.
+ * From Init path, store current firmware details.
+ * From OCR path, detect any firmware properties changes.
+ * in case of Firmware upgrade without system reboot.
+ */
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
ctrl_info->adapterOperations3.useSeqNumJbodFP;
+
+ /*Check whether controller is iMR or MR */
instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
- "controller type\t: %s(%dMB)\n",
- instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ "controller type\t: %s(%dMB)\n",
+ instance->is_imr ? "iMR" : "MR",
+ le16_to_cpu(ctrl_info->memory_size));
+
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
- instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
instance->secure_jbod_support =
ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
+ instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ case DCMD_FAILED:
+ megaraid_sas_kill_hba(instance);
+ break;
+
}
pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
ci, ci_h);
megasas_return_cmd(instance, cmd);
+
+
return ret;
}
@@ -4304,12 +4609,28 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- megasas_return_cmd(instance, cmd);
+ if (ret == DCMD_TIMEOUT) {
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ } else
+ megasas_return_cmd(instance, cmd);
+
return ret;
}
@@ -4426,6 +4747,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
}
+ instance->cur_can_queue = instance->max_scsi_cmds;
/*
* Create a pool of commands
*/
@@ -4756,6 +5078,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors = ((scratch_pad_2
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ if (rdpq_enable)
+ instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+ 1 : 0;
fw_msix_count = instance->msix_vectors;
/* Save 1-15 reply post index address to local memory
* Index 0 is already saved from reg offset
@@ -4792,6 +5117,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
dev_info(&instance->pdev->dev,
"current msix/online cpus\t: (%d/%d)\n",
instance->msix_vectors, (unsigned int)num_online_cpus());
+ dev_info(&instance->pdev->dev,
+ "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
@@ -4856,7 +5183,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
- instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
+ instance->passive = ctrl_info->cluster.passive;
+ memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -4932,6 +5261,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
+ if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+ resetwaittime = MEGASAS_RESET_WAIT_TIME;
+
+ if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+ scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
@@ -5035,10 +5369,8 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
- else {
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
+ DCMD_SUCCESS) {
/*
* Copy the data back into callers buffer
*/
@@ -5047,7 +5379,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
eli->clear_seq_num = el_info->clear_seq_num;
eli->shutdown_seq_num = el_info->shutdown_seq_num;
eli->boot_seq_num = el_info->boot_seq_num;
- }
+ } else
+ dev_err(&instance->pdev->dev, "DCMD failed "
+ "from %s\n", __func__);
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -5262,6 +5596,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
if (instance->ctrl_context) {
host->hostt->eh_device_reset_handler = NULL;
host->hostt->eh_bus_reset_handler = NULL;
+ host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
+ host->hostt->eh_abort_handler = megasas_task_abort_fusion;
}
/*
@@ -5447,7 +5783,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->flag_ieee = 0;
instance->ev = NULL;
instance->issuepend_done = 1;
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
instance->is_imr = 0;
instance->evt_detail = pci_alloc_consistent(pdev,
@@ -5461,6 +5797,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
goto fail_alloc_dma_buf;
}
+ instance->pd_info = pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+
+ if (!instance->pd_info)
+ dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
/*
* Initialize locks and queues
*/
@@ -5476,8 +5818,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->aen_mutex);
mutex_init(&instance->reset_mutex);
+ mutex_init(&instance->hba_mutex);
/*
* Initialize PCI related and misc parameters
@@ -5592,6 +5934,10 @@ fail_alloc_dma_buf:
instance->evt_detail,
instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -5616,7 +5962,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
cmd = megasas_get_cmd(instance);
@@ -5638,9 +5984,12 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- " from %s\n", __func__);
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+ != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "return from %s %d\n", __func__, __LINE__);
+ return;
+ }
megasas_return_cmd(instance, cmd);
}
@@ -5656,7 +6005,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
cmd = megasas_get_cmd(instance);
@@ -5666,13 +6015,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
if (instance->aen_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->aen_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
if (instance->map_update_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->map_update_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
if (instance->jbod_seq_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->jbod_seq_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
dcmd = &cmd->frame->dcmd;
@@ -5687,9 +6036,12 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->data_xfer_len = 0;
dcmd->opcode = cpu_to_le32(opcode);
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+ != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "return from %s %d\n", __func__, __LINE__);
+ return;
+ }
megasas_return_cmd(instance, cmd);
}
@@ -5847,6 +6199,10 @@ fail_init_mfi:
instance->evt_detail,
instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -5941,11 +6297,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (fusion->ld_drv_map[i])
free_pages((ulong)fusion->ld_drv_map[i],
fusion->drv_map_pages);
- if (fusion->pd_seq_sync)
- dma_free_coherent(&instance->pdev->dev,
- pd_seq_map_sz,
- fusion->pd_seq_sync[i],
- fusion->pd_seq_phys[i]);
+ if (fusion->pd_seq_sync[i])
+ dma_free_coherent(&instance->pdev->dev,
+ pd_seq_map_sz,
+ fusion->pd_seq_sync[i],
+ fusion->pd_seq_phys[i]);
}
free_pages((ulong)instance->ctrl_context,
instance->ctrl_context_pages);
@@ -5965,6 +6321,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6090,7 +6450,7 @@ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
for (i = 0; i < megasas_mgmt_info.max_index; i++) {
local_instance = megasas_mgmt_info.instance[i];
if (local_instance && local_instance->crash_dump_drv_support) {
- if ((local_instance->adprecovery ==
+ if ((atomic_read(&local_instance->adprecovery) ==
MEGASAS_HBA_OPERATIONAL) &&
!megasas_set_crash_dump_params(local_instance,
crash_support)) {
@@ -6227,7 +6587,15 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- megasas_issue_blocked_cmd(instance, cmd, 0);
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+ cmd->sync_cmd = 0;
+ dev_err(&instance->pdev->dev,
+ "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->dcmd.opcode,
+ cmd->cmd_status_drv);
+ return -EBUSY;
+ }
+
cmd->sync_cmd = 0;
if (instance->unload == 1) {
@@ -6330,7 +6698,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Controller in crit error\n");
error = -ENODEV;
goto out_kfree_ioc;
@@ -6349,7 +6717,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
for (i = 0; i < wait_time; i++) {
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
break;
}
@@ -6364,7 +6732,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
}
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
dev_err(&instance->pdev->dev, "timed out while"
@@ -6406,7 +6774,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
if (!instance)
return -ENODEV;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
return -ENODEV;
}
@@ -6417,7 +6785,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
for (i = 0; i < wait_time; i++) {
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock,
flags);
break;
@@ -6434,7 +6802,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
dev_err(&instance->pdev->dev, "timed out while waiting"
"for HBA to recover\n");
@@ -6442,10 +6810,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
- mutex_lock(&instance->aen_mutex);
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
aen.class_locale_word);
- mutex_unlock(&instance->aen_mutex);
+ mutex_unlock(&instance->reset_mutex);
return error;
}
@@ -6476,9 +6844,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
int i;
int error = 0;
compat_uptr_t ptr;
- unsigned long local_raw_ptr;
u32 local_sense_off;
u32 local_sense_len;
+ u32 user_sense_off;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -6496,17 +6864,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
* sense_len is not null, so prepare the 64bit value under
* the same condition.
*/
- if (get_user(local_raw_ptr, ioc->frame.raw) ||
- get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len))
+ if (get_user(local_sense_off, &ioc->sense_off) ||
+ get_user(local_sense_len, &ioc->sense_len) ||
+ get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
-
if (local_sense_len) {
void __user **sense_ioc_ptr =
- (void __user **)((u8*)local_raw_ptr + local_sense_off);
+ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
if (get_user(ptr, sense_cioc_ptr) ||
put_user(compat_ptr(ptr), sense_ioc_ptr))
return -EFAULT;
@@ -6647,6 +7014,7 @@ megasas_aen_polling(struct work_struct *work)
int i, j, doscan = 0;
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
+ u8 dcmd_ret = DCMD_SUCCESS;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -6659,16 +7027,7 @@ megasas_aen_polling(struct work_struct *work)
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
/* Don't run the event workqueue thread if OCR is running */
- for (i = 0; i < wait_time; i++) {
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
- break;
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "%s waiting for "
- "controller reset to finish for scsi%d\n",
- __func__, instance->host->host_no);
- }
- msleep(1000);
- }
+ mutex_lock(&instance->reset_mutex);
instance->ev = NULL;
host = instance->host;
@@ -6676,212 +7035,127 @@ megasas_aen_polling(struct work_struct *work)
megasas_decode_evt(instance);
switch (le32_to_cpu(instance->evt_detail->code)) {
- case MR_EVT_PD_INSERTED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (!sdev1)
- scsi_add_device(host, i, j, 0);
-
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- break;
+ case MR_EVT_PD_INSERTED:
case MR_EVT_PD_REMOVED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- }
- doscan = 0;
+ dcmd_ret = megasas_get_pd_list(instance);
+ if (dcmd_ret == DCMD_SUCCESS)
+ doscan = SCAN_PD_CHANNEL;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- }
- break;
case MR_EVT_LD_CREATED:
if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- doscan = 0;
- }
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret == DCMD_SUCCESS)
+ doscan = SCAN_VD_CHANNEL;
+
break;
+
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
- doscan = 1;
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ break;
+
+ if (!instance->requestorId ||
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ break;
+
+ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+ instance->host->host_no);
break;
+
case MR_EVT_CTRL_PROP_CHANGED:
- megasas_get_ctrl_info(instance);
- break;
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ break;
default:
doscan = 0;
break;
}
} else {
dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
return;
}
- if (doscan) {
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
- instance->host->host_no);
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ mutex_unlock(&instance->reset_mutex);
+
+ if (doscan & SCAN_PD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1)
+ scsi_add_device(host, i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
+ }
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (doscan & SCAN_VD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
}
- if (instance->aen_cmd != NULL) {
- kfree(ev);
- return ;
- }
-
- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ if (dcmd_ret == DCMD_SUCCESS)
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ else
+ seq_num = instance->last_seq_num;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- mutex_lock(&instance->aen_mutex);
+
+ if (instance->aen_cmd != NULL) {
+ kfree(ev);
+ return;
+ }
+
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, seq_num,
class_locale.word);
- mutex_unlock(&instance->aen_mutex);
-
if (error)
- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
+ dev_err(&instance->pdev->dev,
+ "register aen failed error %x\n", error);
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 741509b35617..e413113c86ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1020,6 +1020,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
/* assume this IO needs the full row - we'll adjust if not true */
regSize = stripSize;
+ io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
+
/* Check if we can send this I/O via FastPath */
if (raid->capability.fpCapable) {
if (isRead)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 8d630a552b07..98a848bdfdc2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -91,7 +91,10 @@ void megasas_start_timer(struct megasas_instance *instance,
struct timer_list *timer,
void *fn, unsigned long interval);
extern struct megasas_mgmt_info megasas_mgmt_info;
-extern int resetwaittime;
+extern unsigned int resetwaittime;
+extern unsigned int dual_qdepth_disable;
+static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
+static void megasas_free_reply_fusion(struct megasas_instance *instance);
@@ -201,58 +204,72 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
-
/**
- * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
- * @instance: Adapter soft state
+ * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
+ * @instance: Adapter soft state
+ * fw_boot_context: Whether this function called during probe or after OCR
+ *
+ * This function is only for fusion controllers.
+ * Update host can queue, if firmware downgrade max supported firmware commands.
+ * Firmware upgrade case will be skiped because underlying firmware has
+ * more resource than exposed to the OS.
+ *
*/
-static void megasas_teardown_frame_pool_fusion(
- struct megasas_instance *instance)
+static void
+megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
{
- int i;
- struct fusion_context *fusion = instance->ctrl_context;
-
- u16 max_cmd = instance->max_fw_cmds;
+ u16 cur_max_fw_cmds = 0;
+ u16 ldio_threshold = 0;
+ struct megasas_register_set __iomem *reg_set;
- struct megasas_cmd_fusion *cmd;
+ reg_set = instance->reg_set;
- if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
- dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
- "sense pool : %p\n", fusion->sg_dma_pool,
- fusion->sense_dma_pool);
- return;
- }
+ cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
- /*
- * Return all frames to pool
- */
- for (i = 0; i < max_cmd; i++) {
+ if (dual_qdepth_disable || !cur_max_fw_cmds)
+ cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ else
+ ldio_threshold =
+ (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+
+ dev_info(&instance->pdev->dev,
+ "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
+
+ if (fw_boot_context == OCR_CONTEXT) {
+ cur_max_fw_cmds = cur_max_fw_cmds - 1;
+ if (cur_max_fw_cmds <= instance->max_fw_cmds) {
+ instance->cur_can_queue =
+ cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->host->can_queue = instance->cur_can_queue;
+ instance->ldio_threshold = ldio_threshold;
+ }
+ } else {
+ instance->max_fw_cmds = cur_max_fw_cmds;
+ instance->ldio_threshold = ldio_threshold;
- cmd = fusion->cmd_list[i];
+ if (!instance->is_rdpq)
+ instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
- if (cmd->sg_frame)
- pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
- if (cmd->sense)
- pci_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
}
-
- /*
- * Now destroy the pool itself
- */
- pci_pool_destroy(fusion->sg_dma_pool);
- pci_pool_destroy(fusion->sense_dma_pool);
-
- fusion->sg_dma_pool = NULL;
- fusion->sense_dma_pool = NULL;
}
-
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
* @instance: Adapter soft state
@@ -262,55 +279,65 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
+ struct megasas_cmd_fusion *cmd;
- u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+ /* SG, Sense */
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ if (cmd->sense)
+ pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+ }
+ if (fusion->sg_dma_pool) {
+ pci_pool_destroy(fusion->sg_dma_pool);
+ fusion->sg_dma_pool = NULL;
+ }
+ if (fusion->sense_dma_pool) {
+ pci_pool_destroy(fusion->sense_dma_pool);
+ fusion->sense_dma_pool = NULL;
+ }
- req_sz = fusion->request_alloc_sz;
- reply_sz = fusion->reply_alloc_sz;
- io_frames_sz = fusion->io_frames_alloc_sz;
- max_cmds = instance->max_fw_cmds;
+ /* Reply Frame, Desc*/
+ if (instance->is_rdpq)
+ megasas_free_rdpq_fusion(instance);
+ else
+ megasas_free_reply_fusion(instance);
- /* Free descriptors and request Frames memory */
+ /* Request Frame, Desc*/
if (fusion->req_frames_desc)
- dma_free_coherent(&instance->pdev->dev, req_sz,
- fusion->req_frames_desc,
- fusion->req_frames_desc_phys);
-
- if (fusion->reply_frames_desc) {
- pci_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_destroy(fusion->reply_frames_desc_pool);
- }
-
- if (fusion->io_request_frames) {
+ dma_free_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz, fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+ if (fusion->io_request_frames)
pci_pool_free(fusion->io_request_frames_pool,
- fusion->io_request_frames,
- fusion->io_request_frames_phys);
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ if (fusion->io_request_frames_pool) {
pci_pool_destroy(fusion->io_request_frames_pool);
+ fusion->io_request_frames_pool = NULL;
}
- /* Free the Fusion frame pool */
- megasas_teardown_frame_pool_fusion(instance);
- /* Free all the commands in the cmd_list */
- for (i = 0; i < max_cmds; i++)
+ /* cmd_list */
+ for (i = 0; i < instance->max_fw_cmds; i++)
kfree(fusion->cmd_list[i]);
- /* Free the cmd_list buffer itself */
kfree(fusion->cmd_list);
- fusion->cmd_list = NULL;
-
}
/**
- * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
+ * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
* @instance: Adapter soft state
*
*/
-static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
{
int i;
u32 max_cmd;
@@ -321,25 +348,17 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
max_cmd = instance->max_fw_cmds;
- /*
- * Use DMA pool facility provided by PCI layer
- */
-
- fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
- instance->max_chain_frame_sz,
- 4, 0);
- if (!fusion->sg_dma_pool) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
- return -ENOMEM;
- }
- fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
- instance->pdev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ fusion->sg_dma_pool =
+ pci_pool_create("mr_sg", instance->pdev,
+ instance->max_chain_frame_sz, 4, 0);
+ /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
+ fusion->sense_dma_pool =
+ pci_pool_create("mr_sense", instance->pdev,
+ SCSI_SENSE_BUFFERSIZE, 64, 0);
- if (!fusion->sense_dma_pool) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
- pci_pool_destroy(fusion->sg_dma_pool);
- fusion->sg_dma_pool = NULL;
+ if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
@@ -347,160 +366,280 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
* Allocate and attach a frame to each of the commands in cmd_list
*/
for (i = 0; i < max_cmd; i++) {
-
cmd = fusion->cmd_list[i];
-
cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
- GFP_KERNEL,
- &cmd->sg_frame_phys_addr);
+ GFP_KERNEL, &cmd->sg_frame_phys_addr);
cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- /*
- * megasas_teardown_frame_pool_fusion() takes care of freeing
- * whatever has been allocated
- */
+ GFP_KERNEL, &cmd->sense_phys_addr);
if (!cmd->sg_frame || !cmd->sense) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
- megasas_teardown_frame_pool_fusion(instance);
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
}
return 0;
}
-/**
- * megasas_alloc_cmds_fusion - Allocates the command packets
- * @instance: Adapter soft state
- *
- *
- * Each frame has a 32-bit field called context. This context is used to get
- * back the megasas_cmd_fusion from the frame when a frame gets completed
- * In this driver, the 32 bit values are the indices into an array cmd_list.
- * This array is used only to look up the megasas_cmd_fusion given the context.
- * The free commands themselves are maintained in a linked list called cmd_pool.
- *
- * cmds are formed in the io_request and sg_frame members of the
- * megasas_cmd_fusion. The context field is used to get a request descriptor
- * and is used as SMID of the cmd.
- * SMID value range is from 1 to max_fw_cmds.
- */
int
-megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
- int i, j, count;
- u32 max_cmd, io_frames_sz;
+ u32 max_cmd, i;
struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
- union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- u32 offset;
- dma_addr_t io_req_base_phys;
- u8 *io_req_base;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
+ /*
+ * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
+ GFP_KERNEL);
+ if (!fusion->cmd_list) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
+ GFP_KERNEL);
+ if (!fusion->cmd_list[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+int
+megasas_alloc_request_fusion(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
fusion->req_frames_desc =
dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
-
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "request_frames\n");
- goto fail_req_desc;
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
+ fusion->io_request_frames_pool =
+ pci_pool_create("mr_ioreq", instance->pdev,
+ fusion->io_frames_alloc_sz, 16, 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ pci_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL, &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int
+megasas_alloc_reply_fusion(struct megasas_instance *instance)
+{
+ int i, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ fusion = instance->ctrl_context;
+
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
fusion->reply_frames_desc_pool =
- pci_pool_create("reply_frames pool", instance->pdev,
+ pci_pool_create("mr_reply", instance->pdev,
fusion->reply_alloc_sz * count, 16, 0);
if (!fusion->reply_frames_desc_pool) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "reply_frame pool\n");
- goto fail_reply_desc;
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- fusion->reply_frames_desc =
- pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
- &fusion->reply_frames_desc_phys);
- if (!fusion->reply_frames_desc) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "reply_frame pool\n");
- pci_pool_destroy(fusion->reply_frames_desc_pool);
- goto fail_reply_desc;
+ fusion->reply_frames_desc[0] =
+ pci_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
-
- reply_desc = fusion->reply_frames_desc;
+ reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
- io_frames_sz = fusion->io_frames_alloc_sz;
+ /* This is not a rdpq mode, but driver still populate
+ * reply_frame_desc array to use same msix index in ISR path.
+ */
+ for (i = 0; i < (count - 1); i++)
+ fusion->reply_frames_desc[i + 1] =
+ fusion->reply_frames_desc[i] +
+ (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
+
+ return 0;
+}
- fusion->io_request_frames_pool =
- pci_pool_create("io_request_frames pool", instance->pdev,
- fusion->io_frames_alloc_sz, 16, 0);
+int
+megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
+{
+ int i, j, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- if (!fusion->io_request_frames_pool) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "io_request_frame pool\n");
- goto fail_io_frames;
+ fusion = instance->ctrl_context;
+
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+ &fusion->rdpq_phys);
+ if (!fusion->rdpq_virt) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- fusion->io_request_frames =
- pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
- &fusion->io_request_frames_phys);
- if (!fusion->io_request_frames) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "io_request_frames frames\n");
- pci_pool_destroy(fusion->io_request_frames_pool);
- goto fail_io_frames;
+ memset(fusion->rdpq_virt, 0,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
+ instance->pdev, fusion->reply_alloc_sz, 16, 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- /*
- * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
- * Allocate the dynamic array first and then allocate individual
- * commands.
- */
- fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
- * max_cmd, GFP_KERNEL);
+ for (i = 0; i < count; i++) {
+ fusion->reply_frames_desc[i] =
+ pci_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
+ if (!fusion->reply_frames_desc[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
- if (!fusion->cmd_list) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
- "memory for cmd_list_fusion\n");
- goto fail_cmd_list;
+ fusion->rdpq_virt[i].RDPQBaseAddress =
+ fusion->reply_frames_desc_phys[i];
+
+ reply_desc = fusion->reply_frames_desc[i];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
}
+ return 0;
+}
- max_cmd = instance->max_fw_cmds;
- for (i = 0; i < max_cmd; i++) {
- fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
- GFP_KERNEL);
- if (!fusion->cmd_list[i]) {
- dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
+static void
+megasas_free_rdpq_fusion(struct megasas_instance *instance) {
- for (j = 0; j < i; j++)
- kfree(fusion->cmd_list[j]);
+ int i;
+ struct fusion_context *fusion;
- kfree(fusion->cmd_list);
- fusion->cmd_list = NULL;
- goto fail_cmd_list;
- }
+ fusion = instance->ctrl_context;
+
+ for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
+ if (fusion->reply_frames_desc[i])
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[i],
+ fusion->reply_frames_desc_phys[i]);
}
- /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
- io_req_base = fusion->io_request_frames +
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
- io_req_base_phys = fusion->io_request_frames_phys +
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ if (fusion->reply_frames_desc_pool)
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+ if (fusion->rdpq_virt)
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+ fusion->rdpq_virt, fusion->rdpq_phys);
+}
+
+static void
+megasas_free_reply_fusion(struct megasas_instance *instance) {
+
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->reply_frames_desc[0])
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+
+ if (fusion->reply_frames_desc_pool)
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+}
+
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ u32 offset;
+ dma_addr_t io_req_base_phys;
+ u8 *io_req_base;
+
+
+ fusion = instance->ctrl_context;
+
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ if (megasas_alloc_request_fusion(instance))
+ goto fail_exit;
+
+ if (instance->is_rdpq) {
+ if (megasas_alloc_rdpq_fusion(instance))
+ goto fail_exit;
+ } else
+ if (megasas_alloc_reply_fusion(instance))
+ goto fail_exit;
+
+
+ /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
+ io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
/*
* Add all the commands to command pool (fusion->cmd_pool)
*/
/* SMID 0 is reserved. Set SMID/index from 1 */
- for (i = 0; i < max_cmd; i++) {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
cmd = fusion->cmd_list[i];
offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
@@ -518,35 +657,13 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
cmd->io_request_phys_addr = io_req_base_phys + offset;
}
- /*
- * Create a frame pool and assign one frame to each cmd
- */
- if (megasas_create_frame_pool_fusion(instance)) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
- megasas_free_cmds_fusion(instance);
- goto fail_req_desc;
- }
+ if (megasas_create_sg_sense_fusion(instance))
+ goto fail_exit;
return 0;
-fail_cmd_list:
- pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
- fusion->io_request_frames_phys);
- pci_pool_destroy(fusion->io_request_frames_pool);
-fail_io_frames:
- dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_destroy(fusion->reply_frames_desc_pool);
-
-fail_reply_desc:
- dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
- fusion->req_frames_desc,
- fusion->req_frames_desc_phys);
-fail_req_desc:
+fail_exit:
+ megasas_free_cmds_fusion(instance);
return -ENOMEM;
}
@@ -576,11 +693,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
msleep(20);
}
- if (frame_hdr->cmd_status == 0xff)
- return -ETIME;
-
- return (frame_hdr->cmd_status == MFI_STAT_OK) ?
- 0 : 1;
+ if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
+ return DCMD_TIMEOUT;
+ else if (frame_hdr->cmd_status == MFI_STAT_OK)
+ return DCMD_SUCCESS;
+ else
+ return DCMD_FAILED;
}
/**
@@ -593,16 +711,17 @@ int
megasas_ioc_init_fusion(struct megasas_instance *instance)
{
struct megasas_init_frame *init_frame;
- struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+ struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
dma_addr_t ioc_init_handle;
struct megasas_cmd *cmd;
- u8 ret;
+ u8 ret, cur_rdpq_mode;
struct fusion_context *fusion;
union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
int i;
struct megasas_header *frame_hdr;
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
+ u32 scratch_pad_2;
fusion = instance->ctrl_context;
@@ -614,6 +733,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_get_cmd;
}
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
+
+ cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+
+ if (instance->is_rdpq && !cur_rdpq_mode) {
+ dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
+ " from RDPQ mode to non RDPQ mode\n");
+ ret = 1;
+ goto fail_fw_init;
+ }
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -635,7 +766,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
- IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
+ cpu_to_le64(fusion->rdpq_phys) :
+ cpu_to_le64(fusion->reply_frames_desc_phys[0]);
+ IOCInitMessage->MsgFlags = instance->is_rdpq ?
+ MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame;
@@ -665,6 +800,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
drv_ops->mfi_capabilities.support_ext_io_size = 1;
+ drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
+ if (!dual_qdepth_disable)
+ drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
+
+ drv_ops->mfi_capabilities.support_qd_throttling = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -784,7 +924,8 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
/* Below code is only for non pended DCMD */
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd, 60);
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -795,7 +936,10 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
ret = -EINVAL;
}
- if (!ret)
+ if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ megaraid_sas_kill_hba(instance);
+
+ if (ret == DCMD_SUCCESS)
instance->pd_seq_map_id++;
megasas_return_cmd(instance, cmd);
@@ -875,10 +1019,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
+ if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ megaraid_sas_kill_hba(instance);
+
megasas_return_cmd(instance, cmd);
return ret;
@@ -1072,12 +1219,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
reg_set = instance->reg_set;
- /*
- * Get various operational parameters from status register
- */
- instance->max_fw_cmds =
- instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
- instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+ megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
/*
* Reduce the max supported cmds by 1. This is to ensure that the
@@ -1658,7 +1800,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
local_map_ptr, start_lba_lo);
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.regLockFlags ==
@@ -1702,8 +1844,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.regLockFlags ==
- REGION_TYPE_UNUSED)
+ if (io_info.do_fp_rlbypass ||
+ (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1791,7 +1933,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* build request descriptor */
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle = devHandle;
@@ -1897,7 +2039,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
}
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
}
@@ -2035,13 +2177,21 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
+ if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
+ instance->ldio_threshold &&
+ (atomic_inc_return(&instance->ldio_outstanding) >
+ instance->ldio_threshold)) {
+ atomic_dec(&instance->ldio_outstanding);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
index = cmd->index;
req_desc = megasas_get_request_descriptor(instance, index-1);
if (!req_desc)
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
req_desc->Words = 0;
cmd->request_desc = req_desc;
@@ -2050,7 +2200,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
megasas_return_cmd_fusion(instance, cmd);
dev_err(&instance->pdev->dev, "Error building command\n");
cmd->request_desc = NULL;
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
}
req_desc = cmd->request_desc;
@@ -2092,16 +2242,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
struct LD_LOAD_BALANCE_INFO *lbinfo;
int threshold_reply_count = 0;
struct scsi_cmnd *scmd_local = NULL;
+ struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
+ struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
fusion = instance->ctrl_context;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return IRQ_HANDLED;
- desc = fusion->reply_frames_desc;
- desc += ((MSIxIndex * fusion->reply_alloc_sz)/
- sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
- fusion->last_reply_idx[MSIxIndex];
+ desc = fusion->reply_frames_desc[MSIxIndex] +
+ fusion->last_reply_idx[MSIxIndex];
reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
@@ -2133,6 +2283,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
extStatus = scsi_io_req->RaidContext.exStatus;
switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
+ cmd_fusion->io_request;
+ mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
+ &mr_tm_req->TmRequest;
+ dev_dbg(&instance->pdev->dev, "TM completion:"
+ "type: 0x%x TaskMID: 0x%x\n",
+ mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
+ complete(&cmd_fusion->done);
+ break;
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
device_id = MEGASAS_DEV_INDEX(scmd_local);
@@ -2155,6 +2315,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
map_cmd_status(cmd_fusion, status, extStatus);
scsi_io_req->RaidContext.status = 0;
scsi_io_req->RaidContext.exStatus = 0;
+ if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
scmd_local->scsi_done(scmd_local);
@@ -2186,9 +2348,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
/* Get the next reply descriptor */
if (!fusion->last_reply_idx[MSIxIndex])
- desc = fusion->reply_frames_desc +
- ((MSIxIndex * fusion->reply_alloc_sz)/
- sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+ desc = fusion->reply_frames_desc[MSIxIndex];
else
desc++;
@@ -2254,7 +2414,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
/* If we have already declared adapter dead, donot complete cmds */
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
return;
}
@@ -2411,7 +2571,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
* @cmd: mfi cmd pointer
*
*/
-void
+int
megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
@@ -2419,10 +2579,13 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
req_desc = build_mpt_cmd(instance, cmd);
if (!req_desc) {
- dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
- return;
+ dev_info(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
}
+
megasas_fire_cmd_fusion(instance, req_desc);
+ return DCMD_SUCCESS;
}
/**
@@ -2583,7 +2746,7 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
/* This function waits for outstanding commands on fusion to complete */
int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
- int iotimeout, int *convert)
+ int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state;
@@ -2599,14 +2762,22 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
retval = 1;
goto out;
}
+
+ if (reason == MFI_IO_TIMEOUT_OCR) {
+ dev_info(&instance->pdev->dev,
+ "MFI IO is timed out, initiating OCR\n");
+ retval = 1;
+ goto out;
+ }
+
/* If SR-IOV VF mode & heartbeat timeout, don't wait */
- if (instance->requestorId && !iotimeout) {
+ if (instance->requestorId && !reason) {
retval = 1;
goto out;
}
/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
- if (instance->requestorId && iotimeout) {
+ if (instance->requestorId && reason) {
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
instance->hb_host_mem->HB.driverCounter =
@@ -2655,17 +2826,18 @@ out:
void megasas_reset_reply_desc(struct megasas_instance *instance)
{
- int i, count;
+ int i, j, count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
- for (i = 0 ; i < count ; i++)
+ for (i = 0 ; i < count ; i++) {
fusion->last_reply_idx[i] = 0;
- reply_desc = fusion->reply_frames_desc;
- for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ reply_desc = fusion->reply_frames_desc[i];
+ for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ }
}
/*
@@ -2680,6 +2852,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
+ bool refire_cmd = 0;
fusion = instance->ctrl_context;
@@ -2695,42 +2868,500 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
continue;
req_desc = megasas_get_request_descriptor
(instance, smid - 1);
- if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
+ refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
(cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
+ cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
+ && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+ if (refire_cmd)
megasas_fire_cmd_fusion(instance, req_desc);
else
megasas_return_cmd(instance, cmd_mfi);
}
}
+/*
+ * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
+ * @instance: per adapter struct
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ *
+ * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
+ */
+
+static int megasas_track_scsiio(struct megasas_instance *instance,
+ int id, int channel)
+{
+ int i, found = 0;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ fusion = instance->ctrl_context;
+
+ for (i = 0 ; i < instance->max_scsi_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd &&
+ (cmd_fusion->scmd->device->id == id &&
+ cmd_fusion->scmd->device->channel == channel)) {
+ dev_info(&instance->pdev->dev,
+ "SCSI commands pending to target"
+ "channel %d id %d \tSMID: 0x%x\n",
+ channel, id, cmd_fusion->index);
+ scsi_print_command(cmd_fusion->scmd);
+ found = 1;
+ break;
+ }
+ }
+
+ return found ? FAILED : SUCCESS;
+}
+
+/**
+ * megasas_tm_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @mpi_reply: MPI reply returned by firmware
+ *
+ * Return nothing.
+ */
+static void
+megasas_tm_response_code(struct megasas_instance *instance,
+ struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
+{
+ char *desc;
+
+ switch (mpi_reply->ResponseCode) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
+ mpi_reply->ResponseCode, desc);
+ dev_dbg(&instance->pdev->dev,
+ "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
+ " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
+ mpi_reply->TerminationCount, mpi_reply->DevHandle,
+ mpi_reply->Function, mpi_reply->TaskType,
+ mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
+}
+
+/**
+ * megasas_issue_tm - main routine for sending tm requests
+ * @instance: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
+ * @smid_task: smid assigned to the task
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * MegaRaid use MPT interface for Task Magement request.
+ * A generic API for sending task management requests to firmware.
+ *
+ * Return SUCCESS or FAILED.
+ */
+static int
+megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
+ uint channel, uint id, u16 smid_task, u8 type)
+{
+ struct MR_TASK_MANAGE_REQUEST *mr_request;
+ struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
+ unsigned long timeleft;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *scsi_lookup;
+ int rc;
+ struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
+
+ fusion = instance->ctrl_context;
+
+ cmd_mfi = megasas_get_cmd(instance);
+
+ if (!cmd_mfi) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd_fusion = megasas_get_cmd_fusion(instance,
+ instance->max_scsi_cmds + cmd_mfi->index);
+
+ /* Save the smid. To be used for returning the cmd */
+ cmd_mfi->context.smid = cmd_fusion->index;
+
+ req_desc = megasas_get_request_descriptor(instance,
+ (cmd_fusion->index - 1));
+ if (!req_desc) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_cmd(instance, cmd_mfi);
+ return -ENOMEM;
+ }
+
+ cmd_fusion->request_desc = req_desc;
+ req_desc->Words = 0;
+
+ scsi_lookup = fusion->cmd_list[smid_task - 1];
+
+ mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
+ memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
+ mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(device_handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ mpi_request->LUN[1] = 0;
+
+
+ req_desc = cmd_fusion->request_desc;
+ req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
+ req_desc->HighPriority.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ req_desc->HighPriority.MSIxIndex = 0;
+ req_desc->HighPriority.LMID = 0;
+ req_desc->HighPriority.Reserved1 = 0;
+
+ if (channel < MEGASAS_MAX_PD_CHANNELS)
+ mr_request->tmReqFlags.isTMForPD = 1;
+ else
+ mr_request->tmReqFlags.isTMForLD = 1;
+
+ init_completion(&cmd_fusion->done);
+ megasas_fire_cmd_fusion(instance, req_desc);
+
+ timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+
+ if (!timeleft) {
+ dev_err(&instance->pdev->dev,
+ "task mgmt type 0x%x timed out\n", type);
+ cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
+ rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ return rc;
+ }
+
+ mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
+ megasas_tm_response_code(instance, mpi_reply);
+
+ megasas_return_cmd(instance, cmd_mfi);
+ rc = SUCCESS;
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ if (scsi_lookup->scmd == NULL)
+ break;
+ else {
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+ megasas_complete_cmd_dpc_fusion
+ ((unsigned long)instance);
+ instance->instancet->enable_intr(instance);
+ if (scsi_lookup->scmd == NULL)
+ break;
+ }
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
+ break;
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+ megasas_complete_cmd_dpc_fusion
+ ((unsigned long)instance);
+ rc = megasas_track_scsiio(instance, id, channel);
+ instance->instancet->enable_intr(instance);
+
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ return rc;
+
+}
+
+/*
+ * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
+ * @instance: per adapter struct
+ *
+ * Return Non Zero index, if SMID found in outstanding commands
+ */
+static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
+{
+ int i, ret = 0;
+ struct megasas_instance *instance;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ fusion = instance->ctrl_context;
+
+ for (i = 0; i < instance->max_scsi_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
+ scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
+ " SMID: %d\n", cmd_fusion->index);
+ ret = cmd_fusion->index;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+* megasas_get_tm_devhandle - Get devhandle for TM request
+* @sdev- OS provided scsi device
+*
+* Returns- devhandle/targetID of SCSI device
+*/
+static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ u32 device_id;
+ struct megasas_instance *instance;
+ struct fusion_context *fusion;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+ u16 devhandle = (u16)ULONG_MAX;
+
+ instance = (struct megasas_instance *)sdev->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ if (instance->use_seqnum_jbod_fp) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ devhandle = pd_sync->seq[pd_index].devHandle;
+ } else
+ sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
+ " without JBOD MAP support from %s %d\n", __func__, __LINE__);
+ } else {
+ device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+ + sdev->id;
+ devhandle = device_id;
+ }
+
+ return devhandle;
+}
+
+/*
+ * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
+ * @scmd : pointer to scsi command object
+ *
+ * Return SUCCESS, if command aborted else FAILED
+ */
+
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ u16 smid, devhandle;
+ struct fusion_context *fusion;
+ int ret;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scmd->device->hostdata;
+
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+ "SCSI host:%d\n", instance->host->host_no);
+ ret = FAILED;
+ return ret;
+ }
+
+ if (!mr_device_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ ret = SUCCESS;
+ goto out;
+ }
+
+
+ if (!mr_device_priv_data->is_tm_capable) {
+ ret = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+
+ smid = megasas_fusion_smid_lookup(scmd);
+
+ if (!smid) {
+ ret = SUCCESS;
+ scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
+ " issued is not found in oustanding commands\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+
+ devhandle = megasas_get_tm_devhandle(scmd->device);
+
+ if (devhandle == (u16)ULONG_MAX) {
+ ret = SUCCESS;
+ sdev_printk(KERN_INFO, scmd->device,
+ "task abort issued for invalid devhandle\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+ scmd, devhandle);
+
+ mr_device_priv_data->tm_busy = 1;
+ ret = megasas_issue_tm(instance, devhandle,
+ scmd->device->channel, scmd->device->id, smid,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+ mr_device_priv_data->tm_busy = 0;
+
+ mutex_unlock(&instance->reset_mutex);
+out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return ret;
+}
+
+/*
+ * megasas_reset_target_fusion : target reset function for fusion adapters
+ * scmd: SCSI command pointer
+ *
+ * Returns SUCCESS if all commands associated with target aborted else FAILED
+ */
+
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+{
+
+ struct megasas_instance *instance;
+ int ret = FAILED;
+ u16 devhandle;
+ struct fusion_context *fusion;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scmd->device->hostdata;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+ "SCSI host:%d\n", instance->host->host_no);
+ ret = FAILED;
+ return ret;
+ }
+
+ if (!mr_device_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ ret = SUCCESS;
+ goto out;
+ }
+
+
+ if (!mr_device_priv_data->is_tm_capable) {
+ ret = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+ devhandle = megasas_get_tm_devhandle(scmd->device);
+
+ if (devhandle == (u16)ULONG_MAX) {
+ ret = SUCCESS;
+ sdev_printk(KERN_INFO, scmd->device,
+ "target reset issued for invalid devhandle\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+ scmd, devhandle);
+ mr_device_priv_data->tm_busy = 1;
+ ret = megasas_issue_tm(instance, devhandle,
+ scmd->device->channel, scmd->device->id, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ mr_device_priv_data->tm_busy = 0;
+ mutex_unlock(&instance->reset_mutex);
+out:
+ scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+ (ret == SUCCESS) ? "SUCCESS" : "FAILED");
+
+ return ret;
+}
+
+/*SRIOV get other instance in cluster if any*/
+struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+{
+ int i;
+
+ for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->requestorId &&
+ megasas_mgmt_info.instance[i]->peerIsPresent &&
+ (memcmp((megasas_mgmt_info.instance[i]->clusterId),
+ instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
+ return megasas_mgmt_info.instance[i];
+ }
+ return NULL;
+}
+
/* Check for a second path that is currently UP */
int megasas_check_mpio_paths(struct megasas_instance *instance,
struct scsi_cmnd *scmd)
{
- int i, j, retval = (DID_RESET << 16);
-
- if (instance->mpio && instance->requestorId) {
- for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
- for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
- if (megasas_mgmt_info.instance[i] &&
- (megasas_mgmt_info.instance[i] != instance) &&
- megasas_mgmt_info.instance[i]->mpio &&
- megasas_mgmt_info.instance[i]->requestorId
- &&
- (megasas_mgmt_info.instance[i]->ld_ids[j]
- == scmd->device->id)) {
- retval = (DID_NO_CONNECT << 16);
- goto out;
- }
+ struct megasas_instance *peer_instance = NULL;
+ int retval = (DID_RESET << 16);
+
+ if (instance->peerIsPresent) {
+ peer_instance = megasas_get_peer_instance(instance);
+ if ((peer_instance) &&
+ (atomic_read(&peer_instance->adprecovery) ==
+ MEGASAS_HBA_OPERATIONAL))
+ retval = (DID_NO_CONNECT << 16);
}
-out:
return retval;
}
/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
{
int retval = SUCCESS, i, convert = 0;
struct megasas_instance *instance;
@@ -2739,13 +3370,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
u32 abs_state, status_reg, reset_adapter;
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
+ struct scsi_device *sdev;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
mutex_lock(&instance->reset_mutex);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_warn(&instance->pdev->dev, "Hardware critical error, "
"returning FAILED for scsi%d.\n",
instance->host->host_no);
@@ -2757,10 +3389,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
/* IO timeout detected, forcibly put FW in FAULT state */
if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
- instance->crash_dump_app_support && iotimeout) {
- dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ instance->crash_dump_app_support && reason) {
+ dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
"forcibly FAULT Firmware\n");
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
status_reg = readl(&instance->reg_set->doorbell);
writel(status_reg | MFI_STATE_FORCE_OCR,
&instance->reg_set->doorbell);
@@ -2772,10 +3404,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
dev_dbg(&instance->pdev->dev, "waiting for [%d] "
"seconds for crash dump collection and OCR "
"to be done\n", (io_timeout_in_crash_mode * 3));
- } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
(io_timeout_in_crash_mode < 80));
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
dev_info(&instance->pdev->dev, "OCR done for IO "
"timeout case\n");
retval = SUCCESS;
@@ -2792,18 +3424,18 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
msleep(1000);
/* First try waiting for commands to complete */
- if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ if (megasas_wait_for_outstanding_fusion(instance, reason,
&convert)) {
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
dev_warn(&instance->pdev->dev, "resetting fusion "
"adapter scsi%d.\n", instance->host->host_no);
if (convert)
- iotimeout = 0;
+ reason = 0;
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
@@ -2813,6 +3445,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
scmd_local->result =
megasas_check_mpio_paths(instance,
scmd_local);
+ if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
scmd_local->scsi_done(scmd_local);
@@ -2837,55 +3471,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
}
/* Let SR-IOV VF & PF sync up if there was a HB failure */
- if (instance->requestorId && !iotimeout) {
+ if (instance->requestorId && !reason) {
msleep(MEGASAS_OCR_SETTLE_TIME_VF);
- /* Look for a late HB update after VF settle time */
- if (abs_state == MFI_STATE_OPERATIONAL &&
- (instance->hb_host_mem->HB.fwCounter !=
- instance->hb_host_mem->HB.driverCounter)) {
- instance->hb_host_mem->HB.driverCounter =
- instance->hb_host_mem->HB.fwCounter;
- dev_warn(&instance->pdev->dev, "SR-IOV:"
- "Late FW heartbeat update for "
- "scsi%d.\n",
- instance->host->host_no);
- } else {
- /* In VF mode, first poll for FW ready */
- for (i = 0;
- i < (MEGASAS_RESET_WAIT_TIME * 1000);
- i += 20) {
- status_reg =
- instance->instancet->
- read_fw_status_reg(
- instance->reg_set);
- abs_state = status_reg &
- MFI_STATE_MASK;
- if (abs_state == MFI_STATE_READY) {
- dev_warn(&instance->pdev->dev,
- "SR-IOV: FW was found"
- "to be in ready state "
- "for scsi%d.\n",
- instance->host->host_no);
- break;
- }
- msleep(20);
- }
- if (abs_state != MFI_STATE_READY) {
- dev_warn(&instance->pdev->dev, "SR-IOV: "
- "FW not in ready state after %d"
- " seconds for scsi%d, status_reg = "
- "0x%x.\n",
- MEGASAS_RESET_WAIT_TIME,
- instance->host->host_no,
- status_reg);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- instance->adprecovery =
- MEGASAS_HW_CRITICAL_ERROR;
- retval = FAILED;
- goto out;
- }
- }
+ goto transition_to_ready;
}
/* Now try to reset the chip */
@@ -2894,23 +3482,28 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (instance->instancet->adp_reset
(instance, instance->reg_set))
continue;
-
+transition_to_ready:
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
- dev_warn(&instance->pdev->dev, "Failed to "
- "transition controller to ready "
- "for scsi%d.\n",
- instance->host->host_no);
- continue;
+ dev_warn(&instance->pdev->dev,
+ "Failed to transition controller to ready for "
+ "scsi%d.\n", instance->host->host_no);
+ if (instance->requestorId && !reason)
+ goto fail_kill_adapter;
+ else
+ continue;
}
-
megasas_reset_reply_desc(instance);
+ megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
+
if (megasas_ioc_init_fusion(instance)) {
dev_warn(&instance->pdev->dev,
- "megasas_ioc_init_fusion() failed!"
- " for scsi%d\n",
- instance->host->host_no);
- continue;
+ "megasas_ioc_init_fusion() failed! for "
+ "scsi%d\n", instance->host->host_no);
+ if (instance->requestorId && !reason)
+ goto fail_kill_adapter;
+ else
+ continue;
}
megasas_refire_mgmt_cmd(instance);
@@ -2932,10 +3525,13 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
megasas_setup_jbod_map(instance);
+ shost_for_each_device(sdev, shost)
+ megasas_update_sdev_properties(sdev);
+
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
@@ -2964,6 +3560,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
retval = SUCCESS;
goto out;
}
+fail_kill_adapter:
/* Reset failed, kill the adapter */
dev_warn(&instance->pdev->dev, "Reset failed, killing "
"adapter scsi%d.\n", instance->host->host_no);
@@ -2980,7 +3577,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
}
out:
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 473005c99b44..80eaee22f5bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -176,7 +176,9 @@ enum REGION_TYPE {
#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
-#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
+#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
@@ -277,6 +279,100 @@ union MPI2_SCSI_IO_CDB_UNION {
struct MPI2_SGE_SIMPLE_UNION SGE;
};
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+struct MPI2_SCSI_TASK_MANAGE_REQUEST {
+ u16 DevHandle; /*0x00 */
+ u8 ChainOffset; /*0x02 */
+ u8 Function; /*0x03 */
+ u8 Reserved1; /*0x04 */
+ u8 TaskType; /*0x05 */
+ u8 Reserved2; /*0x06 */
+ u8 MsgFlags; /*0x07 */
+ u8 VP_ID; /*0x08 */
+ u8 VF_ID; /*0x09 */
+ u16 Reserved3; /*0x0A */
+ u8 LUN[8]; /*0x0C */
+ u32 Reserved4[7]; /*0x14 */
+ u16 TaskMID; /*0x30 */
+ u16 Reserved5; /*0x32 */
+};
+
+
+/*SCSI Task Management Reply Message */
+struct MPI2_SCSI_TASK_MANAGE_REPLY {
+ u16 DevHandle; /*0x00 */
+ u8 MsgLength; /*0x02 */
+ u8 Function; /*0x03 */
+ u8 ResponseCode; /*0x04 */
+ u8 TaskType; /*0x05 */
+ u8 Reserved1; /*0x06 */
+ u8 MsgFlags; /*0x07 */
+ u8 VP_ID; /*0x08 */
+ u8 VF_ID; /*0x09 */
+ u16 Reserved2; /*0x0A */
+ u16 Reserved3; /*0x0C */
+ u16 IOCStatus; /*0x0E */
+ u32 IOCLogInfo; /*0x10 */
+ u32 TerminationCount; /*0x14 */
+ u32 ResponseInfo; /*0x18 */
+};
+
+struct MR_TM_REQUEST {
+ char request[128];
+};
+
+struct MR_TM_REPLY {
+ char reply[128];
+};
+
+/* SCSI Task Management Request Message */
+struct MR_TASK_MANAGE_REQUEST {
+ /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
+ struct MR_TM_REQUEST TmRequest;
+ union {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved1:30;
+ u32 isTMForPD:1;
+ u32 isTMForLD:1;
+#else
+ u32 isTMForLD:1;
+ u32 isTMForPD:1;
+ u32 reserved1:30;
+#endif
+ u32 reserved2;
+ } tmReqFlags;
+ struct MR_TM_REPLY TMReply;
+ };
+};
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
/*
* RAID SCSI IO Request Message
* Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
@@ -547,7 +643,9 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_LD_RAID {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved4:7;
+ u32 reserved4:5;
+ u32 fpBypassRegionLock:1;
+ u32 tmCapable:1;
u32 fpNonRWCapable:1;
u32 fpReadAcrossStripe:1;
u32 fpWriteAcrossStripe:1;
@@ -569,7 +667,9 @@ struct MR_LD_RAID {
u32 fpWriteAcrossStripe:1;
u32 fpReadAcrossStripe:1;
u32 fpNonRWCapable:1;
- u32 reserved4:7;
+ u32 tmCapable:1;
+ u32 fpBypassRegionLock:1;
+ u32 reserved4:5;
#endif
} capability;
__le32 reserved6;
@@ -639,7 +739,7 @@ struct IO_REQUEST_INFO {
u8 fpOkForIo;
u8 IoforUnevenSpan;
u8 start_span;
- u8 reserved;
+ u8 do_fp_rlbypass;
u64 start_row;
u8 span_arm; /* span[7:5], arm[4:0] */
u8 pd_after_lb;
@@ -694,6 +794,7 @@ struct megasas_cmd_fusion {
u32 sync_cmd_idx;
u32 index;
u8 pd_r1_lb;
+ struct completion done;
};
struct LD_LOAD_BALANCE_INFO {
@@ -807,9 +908,18 @@ struct MR_FW_RAID_MAP_EXT {
* * define MR_PD_CFG_SEQ structure for system PDs
* */
struct MR_PD_CFG_SEQ {
- __le16 seqNum;
- __le16 devHandle;
- u8 reserved[4];
+ u16 seqNum;
+ u16 devHandle;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:7;
+ u8 tmCapable:1;
+#else
+ u8 tmCapable:1;
+ u8 reserved:7;
+#endif
+ } capability;
+ u8 reserved[3];
} __packed;
struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -818,6 +928,12 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
struct MR_PD_CFG_SEQ seq[1];
} __packed;
+struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ u64 RDPQBaseAddress;
+ u32 Reserved1;
+ u32 Reserved2;
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
dma_addr_t req_frames_desc_phys;
@@ -830,8 +946,8 @@ struct fusion_context {
struct dma_pool *sg_dma_pool;
struct dma_pool *sense_dma_pool;
- dma_addr_t reply_frames_desc_phys;
- union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+ dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
struct dma_pool *reply_frames_desc_pool;
u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -841,6 +957,8 @@ struct fusion_context {
u32 reply_alloc_sz;
u32 io_frames_alloc_sz;
+ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
+ dma_addr_t rdpq_phys;
u16 max_sge_in_main_msg;
u16 max_sge_in_chain;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 555367f00228..1753e42826dd 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
+#include <linux/pci.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
@@ -38,7 +39,6 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index ec27ad2d186f..dfad5b8c1890 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.35
+ * mpi2.h Version: 02.00.39
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -92,6 +92,14 @@
* 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
* 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT
* 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-14 02.00.36 Updated copyright information.
+ * Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Scratchpad registers to
+ * MPI2_SYSTEM_INTERFACE_REGS.
+ * Added MPI2_DIAG_SBR_RELOAD.
+ * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -124,8 +132,14 @@
MPI25_VERSION_MINOR)
#define MPI2_VERSION_02_05 (0x0205)
+/*minor version for MPI v2.6 compatible products */
+#define MPI26_VERSION_MINOR (0x06)
+#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI26_VERSION_MINOR)
+#define MPI2_VERSION_02_06 (0x0206)
+
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x23)
+#define MPI2_HEADER_VERSION_UNIT (0x27)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -179,10 +193,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
U32 HCBSize; /*0x74 */
U32 HCBAddressLow; /*0x78 */
U32 HCBAddressHigh; /*0x7C */
- U32 Reserved6[16]; /*0x80 */
+ U32 Reserved6[12]; /*0x80 */
+ U32 Scratchpad[4]; /*0xB0 */
U32 RequestDescriptorPostLow; /*0xC0 */
U32 RequestDescriptorPostHigh; /*0xC4 */
- U32 Reserved7[14]; /*0xC8 */
+ U32 AtomicRequestDescriptorPost;/*0xC8 */
+ U32 Reserved7[13]; /*0xCC */
} MPI2_SYSTEM_INTERFACE_REGS,
*PTR_MPI2_SYSTEM_INTERFACE_REGS,
Mpi2SystemInterfaceRegs_t,
@@ -224,6 +240,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
*/
#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+#define MPI2_DIAG_SBR_RELOAD (0x00002000)
+
#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
@@ -298,10 +316,19 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
/*
- *Offsets for the Request Queue
+ *Offsets for the Scratchpad registers
+ */
+#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0)
+#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4)
+#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8)
+#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC)
+
+/*
+ *Offsets for the Request Descriptor Post Queue
*/
#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8)
/*Hard Reset delay timings */
#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
@@ -329,7 +356,8 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
*pMpi2DefaultRequestDescriptor_t;
/*defines for the RequestFlags field */
-#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
@@ -337,7 +365,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
-#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
/*High Priority Request Descriptor */
typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
@@ -408,6 +436,33 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
Mpi2RequestDescriptorUnion_t,
*pMpi2RequestDescriptorUnion_t;
+/*Atomic Request Descriptors */
+
+/*
+ * All Atomic Request Descriptors have the same format, so the following
+ * structure is used for all Atomic Request Descriptors:
+ * Atomic Default Request Descriptor
+ * Atomic High Priority Request Descriptor
+ * Atomic SCSI IO Request Descriptor
+ * Atomic SCSI Target Request Descriptor
+ * Atomic RAID Accelerator Request Descriptor
+ * Atomic Fast Path SCSI IO Request Descriptor
+ */
+
+/*Atomic Request Descriptor */
+typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+} MPI26_ATOMIC_REQUEST_DESCRIPTOR,
+ *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR,
+ Mpi26AtomicRequestDescriptor_t,
+ *pMpi26AtomicRequestDescriptor_t;
+
+/*for the RequestFlags field, use the same
+ *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR
+ */
+
/*Reply Descriptors */
/*Default Reply Descriptor */
@@ -548,6 +603,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B)
#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
@@ -587,6 +643,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
/****************************************************************************
* Config IOCStatus values
@@ -1045,7 +1102,7 @@ typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
Mpi2IeeeSgeChainUnion_t,
*pMpi2IeeeSgeChainUnion_t;
-/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */
typedef struct _MPI25_IEEE_SGE_CHAIN64 {
U64 Address;
U32 Length;
@@ -1098,6 +1155,11 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+/*Next Segment Format */
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+
/*Data Location Address Space */
#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1108,6 +1170,7 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
(MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02)
/****************************************************************************
* IEEE SGE operation Macros
@@ -1166,6 +1229,7 @@ typedef union _MPI2_SGE_IO_UNION {
#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
/*values for SGL Type subfield */
#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 581fdb375db5..9cf09bf7c4a8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.29
+ * mpi2_cnfg.h Version: 02.00.33
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -178,7 +178,14 @@
* 01-08-14 02.00.28 Added more defines for the BiosOptions field of
* MPI2_CONFIG_PAGE_BIOS_1.
* 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
- * more defines for the BiosOptions field..
+ * more defines for the BiosOptions field.
+ * 11-18-14 02.00.30 Updated copyright information.
+ * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG.
+ * Added AdapterOrderAux fields to BIOS Page 3.
+ * 03-16-15 02.00.31 Updated for MPI v2.6.
+ * Added new SAS Phy Event codes
+ * 05-25-15 02.00.33 Added more defines for the BiosOptions field of
+ * MPI2_CONFIG_PAGE_BIOS_1.
* --------------------------------------------------------------------------
*/
@@ -355,7 +362,6 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
-
/****************************************************************************
* Configuration messages
****************************************************************************/
@@ -457,8 +463,17 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
-
-
+/* MPI v2.6 SAS Products */
+#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9)
+#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4)
+#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5)
+#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6)
+#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7)
+#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8)
+#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0)
+#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1)
+#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
+#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
/*Manufacturing Page 0 */
@@ -941,8 +956,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U8
BoardTemperatureUnits; /*0x16 */
U8 Reserved3; /*0x17 */
- U32 Reserved4; /* 0x18 */
- U32 Reserved5; /* 0x1C */
+ U32 BoardPowerRequirement; /*0x18 */
+ U32 PCISlotPowerAllocation; /*0x1C */
U32 Reserved6; /* 0x20 */
U32 Reserved7; /* 0x24 */
} MPI2_CONFIG_PAGE_IO_UNIT_7,
@@ -1151,6 +1166,62 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+/* IO Unit Page 11 (for MPI v2.6 and later) */
+
+typedef struct _MPI26_IOUNIT11_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /* 0x00 */
+ U8 SpinupDelay; /* 0x01 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+} MPI26_IOUNIT11_SPINUP_GROUP,
+ *PTR_MPI26_IOUNIT11_SPINUP_GROUP,
+ Mpi26IOUnit11SpinupGroup_t,
+ *pMpi26IOUnit11SpinupGroup_t;
+
+/* defines for IO Unit Page 11 SpinupFlags */
+#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * four and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_IOUNITPAGE11_PHY_MAX
+#define MPI26_IOUNITPAGE11_PHY_MAX (4)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */
+ U32 Reserved2; /*0x18 */
+ U32 Reserved3; /*0x1C */
+ U32 Reserved4; /*0x20 */
+ U8 BootDeviceWaitTime; /*0x24 */
+ U8 Reserved5; /*0x25 */
+ U16 Reserved6; /*0x26 */
+ U8 NumPhys; /*0x28 */
+ U8 PEInitialSpinupDelay; /*0x29 */
+ U8 PEReplyDelay; /*0x2A */
+ U8 Flags; /*0x2B */
+ U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */
+} MPI26_CONFIG_PAGE_IO_UNIT_11,
+ *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11,
+ Mpi26IOUnitPage11_t,
+ *pMpi26IOUnitPage11_t;
+
+#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00)
+
+/* defines for Flags field */
+#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01)
+
+/* defines for PHY field */
+#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+
+
+
/****************************************************************************
* IOC Config Pages
@@ -1343,6 +1414,10 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000)
+#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
@@ -1492,6 +1567,8 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
/*BIOS Page 3 */
+#define MPI2_BIOSPAGE3_NUM_ADAPTER (4)
+
typedef struct _MPI2_ADAPTER_INFO {
U8 PciBusNumber; /*0x00 */
U8 PciDeviceAndFunctionNumber; /*0x01 */
@@ -1502,17 +1579,26 @@ typedef struct _MPI2_ADAPTER_INFO {
#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+typedef struct _MPI2_ADAPTER_ORDER_AUX {
+ U64 WWID; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX,
+ Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t;
+
+
typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
U32 GlobalFlags; /*0x04 */
U32 BiosVersion; /*0x08 */
- MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER];
U32 Reserved1; /*0x1C */
+ MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER];
} MPI2_CONFIG_PAGE_BIOS_3,
*PTR_MPI2_CONFIG_PAGE_BIOS_3,
Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
-#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x01)
/*values for BIOS Page 3 GlobalFlags */
#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
@@ -2006,6 +2092,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
@@ -2108,6 +2196,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
@@ -2125,6 +2214,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
@@ -2144,7 +2235,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
*SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
-/*SAS IO Unit Page 4 */
+/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */
typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
U8 MaxTargetSpinup; /*0x00 */
@@ -2715,6 +2806,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004)
#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
@@ -2922,6 +3014,19 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+/*Following codes are product specific and in MPI v2.6 and later */
+#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3)
+#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4)
+#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5)
+#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7)
+#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9)
+#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB)
+#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC)
+
+
/*values for the CounterType field */
#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 068c98efd742..c38f624b859d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.15
+ * mpi2_init.h Version: 02.00.17
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -46,6 +46,11 @@
* 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
* 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
* replacing the Reserved4 field.
+ * 11-18-14 02.00.16 Updated copyright information.
+ * 03-16-15 02.00.17 Updated for MPI v2.6.
+ * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH.
+ * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and
+ * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF.
* --------------------------------------------------------------------------
*/
@@ -128,6 +133,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST {
#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08)
/*SCSI IO SGLFlags bits */
@@ -228,7 +234,7 @@ typedef union _MPI25_SCSI_IO_CDB_UNION {
} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
-/*MPI v2.5 SCSI IO Request Message */
+/*MPI v2.5/2.6 SCSI IO Request Message */
typedef struct _MPI25_SCSI_IO_REQUEST {
U16 DevHandle; /*0x00 */
U8 ChainOffset; /*0x02 */
@@ -302,12 +308,14 @@ typedef struct _MPI25_SCSI_IO_REQUEST {
#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
/*defines for the IoFlags field */
-#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
-#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
-#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000)
#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400)
#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
/*MPI v2.5 defines for the EEDPFlags bits */
@@ -512,6 +520,7 @@ typedef struct _MPI2_SEP_REQUEST {
#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000)
#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
@@ -547,6 +556,7 @@ typedef struct _MPI2_SEP_REPLY {
Mpi2SepReply_t, *pMpi2SepReply_t;
/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000)
#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d7598cc4bb8e..cf510ed91924 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.24
+ * mpi2_ioc.h Version: 02.00.26
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -133,6 +133,10 @@
* Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
* Added Encrypted Hash Extended Image.
* 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
+ * 11-18-14 02.00.25 Updated copyright information.
+ * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
+ * MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
+ * Added MPI26_CTRL_OP_SHUTDOWN.
* --------------------------------------------------------------------------
*/
@@ -165,7 +169,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
U16 HeaderVersion; /*0x0E */
U32 Reserved5; /*0x10 */
U16 Reserved6; /*0x14 */
- U8 Reserved7; /*0x16 */
+ U8 HostPageSize; /*0x16 */
U8 HostMSIxVectors; /*0x17 */
U16 Reserved8; /*0x18 */
U16 SystemRequestFrameSize; /*0x1A */
@@ -289,7 +293,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
U16 MaxDevHandle; /*0x38 */
U16 MaxPersistentEntries; /*0x3A */
U16 MinDevHandle; /*0x3C */
- U16 Reserved4; /*0x3E */
+ U8 CurrentHostPageSize; /* 0x3E */
+ U8 Reserved4; /* 0x3F */
} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
@@ -326,6 +331,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
@@ -343,8 +349,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
/*ProtocolFlags */
-#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
/****************************************************************************
* PortFacts message
@@ -1247,6 +1253,7 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST {
#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
/*MPI v2.0 FWUpload TransactionContext Element */
typedef struct _MPI2_FW_UPLOAD_TCSGE {
@@ -1328,7 +1335,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
U32 Reserved54; /*0x54 */
U32 Reserved58; /*0x58 */
U32 Reserved5C; /*0x5C */
- U32 Reserved60; /*0x60 */
+ U32 BootFlags; /*0x60 */
U32 FirmwareVersionNameWhat; /*0x64 */
U8 FirmwareVersionName[32]; /*0x68 */
U32 VendorNameWhat; /*0x88 */
@@ -1354,18 +1361,22 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
/*Signature0 field */
#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+#define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
/*Signature2 field */
#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
/*defines for using the ProductID field */
#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
@@ -1381,6 +1392,8 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
+#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
@@ -1388,6 +1401,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
@@ -1493,7 +1507,9 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
#define MPI2_FLASH_REGION_MEGARAID (0x09)
-#define MPI2_FLASH_REGION_INIT (0x0A)
+#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
+#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
+#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
@@ -1619,7 +1635,6 @@ typedef struct _MPI25_ENCRYPTED_HASH_DATA {
Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
-
/****************************************************************************
* PowerManagementControl message
****************************************************************************/
@@ -1726,4 +1741,90 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+/****************************************************************************
+* IO Unit Control messages (MPI v2.6 and later only.)
+****************************************************************************/
+
+/* IO Unit Control Request Message */
+typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U8 PhyNum; /* 0x0E */
+ U8 PrimFlags; /* 0x0F */
+ U32 Primitive; /* 0x10 */
+ U8 LookupMethod; /* 0x14 */
+ U8 Reserved5; /* 0x15 */
+ U16 SlotNumber; /* 0x16 */
+ U64 LookupAddress; /* 0x18 */
+ U32 IOCParameterValue; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+ U32 Reserved8; /* 0x28 */
+} MPI26_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI26_IOUNIT_CONTROL_REQUEST,
+ Mpi26IoUnitControlRequest_t,
+ *pMpi26IoUnitControlRequest_t;
+
+/* values for the Operation field */
+#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06)
+#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07)
+#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A)
+#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D)
+#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E)
+#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12)
+#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13)
+#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI26_CTRL_OP_SHUTDOWN (0x16)
+#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
+#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
+#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
+#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/* values for the PrimFlags field */
+#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08)
+#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02)
+#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01)
+
+/* values for the LookupMethod field */
+#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+
+/* IO Unit Control Reply Message */
+typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI26_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI26_IOUNIT_CONTROL_REPLY,
+ Mpi26IoUnitControlReply_t,
+ *pMpi26IoUnitControlReply_t;
+
+
#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 13d93ca029d5..1c0eeeeb5eaf 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.10
+ * mpi2_raid.h Version: 02.00.11
*
* Version History
* ---------------
@@ -31,6 +31,7 @@
* 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
* Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
* 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * 11-18-14 02.00.11 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index 156e30543a2f..c10c2c02a945 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_sas.h
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.08
+ * mpi2_sas.h Version: 02.00.10
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -32,6 +32,9 @@
* Passthrough Request message.
* 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
* for anything newer than MPI v2.0.
+ * 11-18-14 02.00.09 Updated copyright information.
+ * 03-16-15 02.00.10 Updated for MPI v2.6.
+ * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA.
* --------------------------------------------------------------------------
*/
@@ -183,6 +186,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
/*values for PassthroughFlags field */
#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040)
#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
@@ -216,6 +220,8 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
/****************************************************************************
* SAS IO Unit Control messages
+* (MPI v2.5 and earlier only.
+* Replaced by IO Unit Control messages in MPI v2.6 and later.)
****************************************************************************/
/*SAS IO Unit Control Request Message */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 1629e5bce7e1..5f9289a1166f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.12
+ * mpi2_tool.h Version: 02.00.13
*
* Version History
* ---------------
@@ -34,6 +34,7 @@
* it uses MPI Chain SGE as well as MPI Simple SGE.
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
+ * 11-18-14 02.00.13 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index 99ab093602e8..92a81abc2c31 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_type.h
* Title: MPI basic type definitions
* Creation Date: August 16, 2006
*
- * mpi2_type.h Version: 02.00.00
+ * mpi2_type.h Version: 02.00.01
*
* Version History
* ---------------
@@ -14,6 +14,7 @@
* Date Version Description
* -------- -------- ------------------------------------------------------
* 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 11-18-14 02.00.01 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 83658acddd58..e4db5fb3239a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -83,6 +83,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+static int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
static int max_msix_vectors = -1;
module_param(max_msix_vectors, int, 0);
MODULE_PARM_DESC(max_msix_vectors,
@@ -395,6 +399,9 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
desc = "insufficient resources";
break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+ desc = "insufficient power";
+ break;
case MPI2_IOCSTATUS_INVALID_FIELD:
desc = "invalid field";
break;
@@ -772,7 +779,7 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
- return 1;
+ return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
return 1;
@@ -803,6 +810,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
Mpi2EventNotificationReply_t *mpi_reply;
Mpi2EventAckRequest_t *ack_request;
u16 smid;
+ struct _event_ack_list *delayed_event_ack;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (!mpi_reply)
@@ -816,8 +824,18 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
+ GFP_ATOMIC);
+ if (!delayed_event_ack)
+ goto out;
+ INIT_LIST_HEAD(&delayed_event_ack->list);
+ delayed_event_ack->Event = mpi_reply->Event;
+ delayed_event_ack->EventContext = mpi_reply->EventContext;
+ list_add_tail(&delayed_event_ack->list,
+ &ioc->delayed_event_ack_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED: EVENT ACK: event (0x%04x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->Event)));
goto out;
}
@@ -1348,6 +1366,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+
_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
}
@@ -1797,9 +1816,10 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
- irq_set_affinity_hint(reply_q->vector, NULL);
- free_cpumask_var(reply_q->affinity_hint);
- synchronize_irq(reply_q->vector);
+ if (smp_affinity_enable) {
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
+ }
free_irq(reply_q->vector, reply_q);
kfree(reply_q);
}
@@ -1829,9 +1849,12 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
reply_q->msix_index = index;
reply_q->vector = vector;
- if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
- return -ENOMEM;
- cpumask_clear(reply_q->affinity_hint);
+ if (smp_affinity_enable) {
+ if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
+ kfree(reply_q);
+ return -ENOMEM;
+ }
+ }
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
@@ -1845,6 +1868,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
if (r) {
pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
reply_q->name, vector);
+ free_cpumask_var(reply_q->affinity_hint);
kfree(reply_q);
return -EBUSY;
}
@@ -1894,16 +1918,17 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < group ; i++) {
ioc->cpu_msix_table[cpu] = index;
- cpumask_or(reply_q->affinity_hint,
+ if (smp_affinity_enable)
+ cpumask_or(reply_q->affinity_hint,
reply_q->affinity_hint, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
-
- if (irq_set_affinity_hint(reply_q->vector,
+ if (smp_affinity_enable)
+ if (irq_set_affinity_hint(reply_q->vector,
reply_q->affinity_hint))
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "error setting affinity hint for irq vector %d\n",
- ioc->name, reply_q->vector));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "Err setting affinity hint to irq vector %d\n",
+ ioc->name, reply_q->vector));
index++;
}
}
@@ -1961,6 +1986,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
} else if (max_msix_vectors == 0)
goto try_ioapic;
+ if (ioc->msix_vector_count < ioc->cpu_count)
+ smp_affinity_enable = 0;
+
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
@@ -2231,6 +2259,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
}
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
/**
* mpt3sas_base_get_smid - obtain a free smid from internal queue
* @ioc: per adapter object
@@ -2291,6 +2325,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
+ request->msix_io = _base_get_msix_index(ioc);
list_del(&request->tracker_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return smid;
@@ -2413,12 +2448,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
}
#endif
-static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
-{
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
-
/**
* mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
@@ -2472,18 +2501,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
* @ioc: per adapter object
* @smid: system request message index
- *
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.MSIxIndex = msix_task;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
@@ -3185,20 +3215,35 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
}
ioc->shost->sg_tablesize = sg_tablesize;
- ioc->hi_priority_depth = facts->HighPriorityCredit;
- ioc->internal_depth = ioc->hi_priority_depth + (5);
+ ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
+ (facts->RequestCredit / 4));
+ if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
+ if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
+ INTERNAL_SCSIIO_CMDS_COUNT)) {
+ pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
+ Credits, it has just %d number of credits\n",
+ ioc->name, facts->RequestCredit);
+ return -ENOMEM;
+ }
+ ioc->internal_depth = 10;
+ }
+
+ ioc->hi_priority_depth = ioc->internal_depth - (5);
/* command line tunables for max controller queue depth */
if (max_queue_depth != -1 && max_queue_depth != 0) {
max_request_credit = min_t(u16, max_queue_depth +
- ioc->hi_priority_depth + ioc->internal_depth,
- facts->RequestCredit);
+ ioc->internal_depth, facts->RequestCredit);
if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
max_request_credit = MAX_HBA_QUEUE_DEPTH;
} else
max_request_credit = min_t(u16, facts->RequestCredit,
MAX_HBA_QUEUE_DEPTH);
- ioc->hba_queue_depth = max_request_credit;
+ /* Firmware maintains additional facts->HighPriorityCredit number of
+ * credits for HiPriprity Request messages, so hba queue depth will be
+ * sum of max_request_credit and high priority queue depth.
+ */
+ ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
/* request frame size */
ioc->request_sz = facts->IOCRequestFrameSize * 4;
@@ -3206,6 +3251,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* reply frame size */
ioc->reply_sz = facts->ReplyFrameSize * 4;
+ /* chain segment size */
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ if (facts->IOCMaxChainSegmentSize)
+ ioc->chain_segment_sz =
+ facts->IOCMaxChainSegmentSize *
+ MAX_CHAIN_ELEMT_SZ;
+ else
+ /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
+ ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
+ MAX_CHAIN_ELEMT_SZ;
+ } else
+ ioc->chain_segment_sz = ioc->request_sz;
+
/* calculate the max scatter element size */
sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
@@ -3217,7 +3275,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->max_sges_in_main_message = max_sge_elements/sge_size;
/* now do the same for a chain buffer */
- max_sge_elements = ioc->request_sz - sge_size;
+ max_sge_elements = ioc->chain_segment_sz - sge_size;
ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
/*
@@ -3245,7 +3303,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_post_queue_depth += 16 -
(ioc->reply_post_queue_depth % 16);
-
if (ioc->reply_post_queue_depth >
facts->MaxReplyDescriptorPostQueueDepth) {
ioc->reply_post_queue_depth =
@@ -3327,7 +3384,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* set the scsi host can_queue depth
* with some internal commands that could be outstanding
*/
- ioc->shost->can_queue = ioc->scsiio_depth;
+ ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"scsi host: can_queue depth (%d)\n",
ioc->name, ioc->shost->can_queue));
@@ -3354,8 +3411,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
goto out;
- retry_sz += 64;
- ioc->hba_queue_depth = max_request_credit - retry_sz;
+ retry_sz = 64;
+ ioc->hba_queue_depth -= retry_sz;
+ _base_release_memory_pools(ioc);
goto retry_allocation;
}
@@ -3410,7 +3468,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
goto out;
}
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
- ioc->request_sz, 16, 0);
+ ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
ioc->name);
@@ -3424,13 +3482,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->chain_depth = i;
goto chain_done;
}
- total_sz += ioc->request_sz;
+ total_sz += ioc->chain_segment_sz;
}
chain_done:
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->request_sz,
- ((ioc->chain_depth * ioc->request_sz))/1024));
+ ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
+ ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
/* initialize hi-priority queue smid's */
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
@@ -4291,6 +4349,10 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ facts->IOCMaxChainSegmentSize =
+ le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
+ }
facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
ioc->shost->max_id = -1;
@@ -4973,6 +5035,8 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
u32 reply_address;
u16 smid;
struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct _sc_list *delayed_sc, *delayed_sc_next;
+ struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
u8 hide_flag;
struct adapter_reply_queue *reply_q;
long reply_post_free;
@@ -4995,6 +5059,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
kfree(delayed_tr);
}
+ list_for_each_entry_safe(delayed_sc, delayed_sc_next,
+ &ioc->delayed_sc_list, list) {
+ list_del(&delayed_sc->list);
+ kfree(delayed_sc);
+ }
+
+ list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
+ &ioc->delayed_event_ack_list, list) {
+ list_del(&delayed_event_ack->list);
+ kfree(delayed_event_ack);
+ }
+
/* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
INIT_LIST_HEAD(&ioc->free_list);
@@ -5226,6 +5302,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_zero_len_sge = &_base_build_zero_len_sge;
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
/*
* In SAS3.0,
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 5ad271efbd45..32580b514b18 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "09.102.00.00"
-#define MPT3SAS_MAJOR_VERSION 9
-#define MPT3SAS_MINOR_VERSION 102
+#define MPT3SAS_DRIVER_VERSION "12.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 12
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -122,11 +122,16 @@
#define NO_SLEEP 0
#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+/* reserved for issuing internally framed scsi io cmds */
+#define INTERNAL_SCSIIO_CMDS_COUNT 3
#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+#define MAX_CHAIN_ELEMT_SZ 16
+#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
+
/*
* reset phases
*/
@@ -398,6 +403,7 @@ struct MPT3SAS_DEVICE {
u8 configured_lun;
u8 block;
u8 tlr_snoop_check;
+ u8 ignore_delay_remove;
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -643,6 +649,7 @@ struct chain_tracker {
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
* @tracker_list: list of free request (ioc->free_list)
+ * @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
@@ -651,6 +658,7 @@ struct scsiio_tracker {
u8 direct_io;
struct list_head chain_list;
struct list_head tracker_list;
+ u16 msix_io;
};
/**
@@ -676,6 +684,25 @@ struct _tr_list {
u16 state;
};
+/**
+ * struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list
+ * @handle: device handle
+ */
+struct _sc_list {
+ struct list_head list;
+ u16 handle;
+};
+
+/**
+ * struct _event_ack_list - delayed event acknowledgment list
+ * @Event: Event ID
+ * @EventContext: used to track the event uniquely
+ */
+struct _event_ack_list {
+ struct list_head list;
+ u16 Event;
+ u32 EventContext;
+};
/**
* struct adapter_reply_queue - the reply queue struct
@@ -737,7 +764,7 @@ struct mpt3sas_facts {
u32 IOCCapabilities;
union mpi3_version_union FWVersion;
u16 IOCRequestFrameSize;
- u16 Reserved3;
+ u16 IOCMaxChainSegmentSize;
u16 MaxInitiators;
u16 MaxTargets;
u16 MaxSasExpanders;
@@ -884,6 +911,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @max_sges_in_chain_message: number sg elements per chain
* @chains_needed_per_io: max chains per io
* @chain_depth: total chains allocated
+ * @chain_segment_sz: gives the max number of
+ * SGEs accommodate on single chain buffer
* @hi_priority_smid:
* @hi_priority:
* @hi_priority_dma:
@@ -921,6 +950,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @replyPostRegisterIndex: index of next position in Reply Desc Post Queue
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
+ * @delayed_sc_list:
+ * @delayed_event_ack_list:
* @temp_sensors_count: flag to carry the number of temperature sensors
* @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and
* pci resource handling. PCI resource freeing will lead to free
@@ -1089,6 +1120,7 @@ struct MPT3SAS_ADAPTER {
u16 max_sges_in_chain_message;
u16 chains_needed_per_io;
u32 chain_depth;
+ u16 chain_segment_sz;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1142,6 +1174,8 @@ struct MPT3SAS_ADAPTER {
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
+ struct list_head delayed_sc_list;
+ struct list_head delayed_event_ack_list;
u8 temp_sensors_count;
struct mutex pci_access_mutex;
@@ -1213,7 +1247,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 msix_task);
void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
@@ -1259,6 +1294,8 @@ void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address);
+u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d8366b056b70..7d00f09666b6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -401,7 +401,8 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
Mpi2EventNotificationReply_t *mpi_reply;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ if (mpi_reply)
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
return 1;
}
@@ -410,7 +411,7 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
* @ioc: per adapter object
* @iocpp: The ioc pointer is returned in this.
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
- * MPI25_VERSION for mpt3ctl ioctl device.
+ * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*
* Return (-1) means error, else ioc_number.
*/
@@ -419,6 +420,7 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
int mpi_version)
{
struct MPT3SAS_ADAPTER *ioc;
+ int version = 0;
/* global ioc lock to protect controller on list operations */
spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
@@ -427,8 +429,21 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
/* Check whether this ioctl command is from right
* ioctl device or not, if not continue the search.
*/
- if (ioc->hba_mpi_version_belonged != mpi_version)
- continue;
+ version = ioc->hba_mpi_version_belonged;
+ /* MPI25_VERSION and MPI26_VERSION uses same ioctl
+ * device.
+ */
+ if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
+ if ((version == MPI25_VERSION) ||
+ (version == MPI26_VERSION))
+ goto out;
+ else
+ continue;
+ } else {
+ if (version != mpi_version)
+ continue;
+ }
+out:
spin_unlock(&gioc_lock);
*iocpp = ioc;
return ioc_number;
@@ -817,7 +832,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -1053,6 +1068,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
@@ -2203,7 +2219,7 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
* @arg - user space data buffer
* @compat - handles 32 bit applications in 64bit os
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
- * MPI25_VERSION for mpt3ctl ioctl device.
+ * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*/
static long
_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
@@ -2341,10 +2357,12 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
- /* pass MPI25_VERSION value, to indicate that this ioctl cmd
+ /* pass MPI25_VERSION | MPI26_VERSION value,
+ * to indicate that this ioctl cmd
* came from mpt3ctl ioctl device.
*/
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
+ MPI25_VERSION | MPI26_VERSION);
return ret;
}
@@ -2379,7 +2397,8 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
{
long ret;
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI25_VERSION);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
+ MPI25_VERSION | MPI26_VERSION);
return ret;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 9ab77b06434d..e0e4920d0fa6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1589,10 +1589,16 @@ scsih_get_resync(struct device *dev)
percent_complete = 0;
out:
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+ break;
+ }
}
/**
@@ -1650,10 +1656,15 @@ scsih_get_state(struct device *dev)
break;
}
out:
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_state(mpt2sas_raid_template, dev, state);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_state(mpt3sas_raid_template, dev, state);
+ break;
+ }
}
/**
@@ -1682,12 +1693,17 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
break;
}
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_level(mpt2sas_raid_template,
- &sdev->sdev_gendev, level);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ &sdev->sdev_gendev, level);
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_level(mpt3sas_raid_template,
- &sdev->sdev_gendev, level);
+ &sdev->sdev_gendev, level);
+ break;
+ }
}
@@ -1937,7 +1953,15 @@ scsih_slave_configure(struct scsi_device *sdev)
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
ssp_target = 1;
- ds = "SSP";
+ if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SEP) {
+ sdev_printk(KERN_WARNING, sdev,
+ "set ignore_delay_remove for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->ignore_delay_remove = 1;
+ ds = "SES";
+ } else
+ ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
@@ -2193,6 +2217,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
unsigned long timeleft;
struct scsiio_tracker *scsi_lookup = NULL;
int rc;
+ u16 msix_task = 0;
if (m_type == TM_MUTEX_ON)
mutex_lock(&ioc->tm_cmds.mutex);
@@ -2256,7 +2281,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
+ (scsi_lookup->msix_io < ioc->reply_queue_count))
+ msix_task = scsi_lookup->msix_io;
+ else
+ msix_task = 0;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2383,7 +2413,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
(unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot);
- if (sas_device->connector_name)
+ if (sas_device->connector_name[0] != '\0')
starget_printk(KERN_INFO, starget,
"enclosure level(0x%04x),connector name(%s)\n",
sas_device->enclosure_level,
@@ -2927,6 +2957,12 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
continue;
if (sas_device_priv_data->block)
continue;
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
}
@@ -2959,6 +2995,12 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device->pend_sas_rphy_add)
continue;
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
@@ -3118,7 +3160,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
" slot(%d)\n", ioc->name, (unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot));
- if (sas_device->connector_name)
+ if (sas_device->connector_name[0] != '\0')
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"setting delete flag: enclosure level(0x%04x),"
" connector name( %s)\n", ioc->name,
@@ -3151,7 +3193,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3186,6 +3228,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
u32 ioc_state;
+ struct _sc_list *delayed_sc;
if (ioc->remove_host) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3228,9 +3271,16 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- return 1;
+ delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
+ if (!delayed_sc)
+ return _scsih_check_for_pending_tm(ioc, smid);
+ INIT_LIST_HEAD(&delayed_sc->list);
+ delayed_sc->handle = mpi_request_tm->DevHandle;
+ list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:sc:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return _scsih_check_for_pending_tm(ioc, smid);
}
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3281,7 +3331,7 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
}
- return 1;
+ return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
/**
@@ -3332,7 +3382,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3388,6 +3438,142 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
return _scsih_check_for_pending_tm(ioc, smid);
}
+/**
+ * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @event: Event ID
+ * @event_context: used to track events uniquely
+ *
+ * Context - processed in interrupt context.
+ */
+void
+_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
+ u32 event_context)
+{
+ Mpi2EventAckRequest_t *ack_request;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ ioc->name, le16_to_cpu(event), smid,
+ ioc->base_cb_idx));
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = event;
+ ack_request->EventContext = event_context;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+}
+
+/**
+ * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
+ * sas_io_unit_ctrl messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Context - processed in interrupt context.
+ */
+void
+_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 handle)
+ {
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n",
+ __func__, ioc->name));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n",
+ __func__, ioc->name));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, le16_to_cpu(handle), smid,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = handle;
+ mpt3sas_base_put_smid_default(ioc, smid);
+}
+
+/**
+ * _scsih_check_for_pending_internal_cmds - check for pending internal messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Context: Executed in interrupt context
+ *
+ * This will check delayed internal messages list, and process the
+ * next request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _sc_list *delayed_sc;
+ struct _event_ack_list *delayed_event_ack;
+
+ if (!list_empty(&ioc->delayed_event_ack_list)) {
+ delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
+ struct _event_ack_list, list);
+ _scsih_issue_delayed_event_ack(ioc, smid,
+ delayed_event_ack->Event, delayed_event_ack->EventContext);
+ list_del(&delayed_event_ack->list);
+ kfree(delayed_event_ack);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_sc_list)) {
+ delayed_sc = list_entry(ioc->delayed_sc_list.next,
+ struct _sc_list, list);
+ _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
+ delayed_sc->handle);
+ list_del(&delayed_sc->list);
+ kfree(delayed_sc);
+ return 0;
+ }
+ return 1;
+}
/**
* _scsih_check_for_pending_tm - check for pending task management
@@ -4084,6 +4270,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
desc_ioc_state = "eedp app tag error";
break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+ desc_ioc_state = "insufficient power";
+ break;
default:
desc_ioc_state = "unknown";
break;
@@ -4609,6 +4798,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_INVALID_STATE:
case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
default:
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -8391,7 +8581,8 @@ static struct raid_function_template mpt3sas_raid_functions = {
* @pdev: PCI device struct
*
* return MPI2_VERSION for SAS 2.0 HBA devices,
- * MPI25_VERSION for SAS 3.0 HBA devices.
+ * MPI25_VERSION for SAS 3.0 HBA devices, and
+ * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
*/
u16
_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
@@ -8423,6 +8614,17 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI25_MFGPAGE_DEVID_SAS3108_5:
case MPI25_MFGPAGE_DEVID_SAS3108_6:
return MPI25_VERSION;
+ case MPI26_MFGPAGE_DEVID_SAS3216:
+ case MPI26_MFGPAGE_DEVID_SAS3224:
+ case MPI26_MFGPAGE_DEVID_SAS3316_1:
+ case MPI26_MFGPAGE_DEVID_SAS3316_2:
+ case MPI26_MFGPAGE_DEVID_SAS3316_3:
+ case MPI26_MFGPAGE_DEVID_SAS3316_4:
+ case MPI26_MFGPAGE_DEVID_SAS3324_1:
+ case MPI26_MFGPAGE_DEVID_SAS3324_2:
+ case MPI26_MFGPAGE_DEVID_SAS3324_3:
+ case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ return MPI26_VERSION;
}
return 0;
}
@@ -8456,7 +8658,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
* for other generation HBA's return with -ENODEV
*/
- if ((hbas_to_enumerate == 2) && (hba_mpi_version != MPI25_VERSION))
+ if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
+ || hba_mpi_version == MPI26_VERSION)))
return -ENODEV;
switch (hba_mpi_version) {
@@ -8478,6 +8681,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
/* Use mpt3sas driver host template for SAS 3.0 HBA's */
shost = scsi_host_alloc(&mpt3sas_driver_template,
sizeof(struct MPT3SAS_ADAPTER));
@@ -8488,7 +8692,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
- if (pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION)
+ if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
+ pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION))
ioc->msix96_vector = 1;
break;
default:
@@ -8533,6 +8739,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->raid_device_list);
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_sc_list);
+ INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
INIT_LIST_HEAD(&ioc->reply_queue_list);
@@ -8866,6 +9074,28 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Cutlass ~ 3216 and 3224 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Intruder ~ 3316 and 3324 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index ca36d7ea0964..6a84b82d71bb 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1418,7 +1418,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
unsigned long timeleft;
void *psge;
- u32 sgl_flags;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
@@ -1507,24 +1506,10 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
- /* WRITE sgel first */
- sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
- MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
- sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- sizeof(struct phy_control_request), data_out_dma);
-
- /* incr sgel */
- psge += ioc->sge_size;
-
- /* READ sgel last */
- sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
- MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
- MPI2_SGE_FLAGS_END_OF_LIST);
- sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- sizeof(struct phy_control_reply), data_out_dma +
- sizeof(struct phy_control_request));
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_control_request),
+ data_out_dma + sizeof(struct phy_control_request),
+ sizeof(struct phy_control_reply));
dtransportprintk(ioc, pr_info(MPT3SAS_FMT
"phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
@@ -1615,7 +1600,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
SMP_PHY_CONTROL_LINK_RESET);
/* handle hba phys */
- memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = hard_reset ?
MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 02360de6b7e0..39285070f3b5 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev)
mvumi_flush_cache(mhba);
}
-static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mvumi_hba *mhba = NULL;
@@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int mvumi_resume(struct pci_dev *pdev)
+static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
{
int ret;
struct mvumi_hba *mhba = NULL;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index d8a2b5185f56..3b11aad03752 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -2006,9 +2006,8 @@ EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
*/
void osd_set_caps(struct osd_cdb *cdb, const void *caps)
{
- bool is_ver1 = true;
/* NOTE: They start at same address */
- memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
+ memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN);
}
bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index ee00e27ba396..f6ad579280d4 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -37,6 +37,7 @@ typedef struct {
unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
unsigned int failed:1; /* Failure flag */
unsigned wanted:1; /* Parport sharing busy flag */
+ unsigned int dev_no; /* Device number */
wait_queue_head_t *waiting;
struct Scsi_Host *host;
struct list_head list;
@@ -985,15 +986,40 @@ static struct scsi_host_template ppa_template = {
static LIST_HEAD(ppa_hosts);
+/*
+ * Finds the first available device number that can be alloted to the
+ * new ppa device and returns the address of the previous node so that
+ * we can add to the tail and have a list in the ascending order.
+ */
+
+static inline ppa_struct *find_parent(void)
+{
+ ppa_struct *dev, *par = NULL;
+ unsigned int cnt = 0;
+
+ if (list_empty(&ppa_hosts))
+ return NULL;
+
+ list_for_each_entry(dev, &ppa_hosts, list) {
+ if (dev->dev_no != cnt)
+ return par;
+ cnt++;
+ par = dev;
+ }
+
+ return par;
+}
+
static int __ppa_attach(struct parport *pb)
{
struct Scsi_Host *host;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
DEFINE_WAIT(wait);
- ppa_struct *dev;
+ ppa_struct *dev, *temp;
int ports;
int modes, ppb, ppb_hi;
int err = -ENOMEM;
+ struct pardev_cb ppa_cb;
dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
if (!dev)
@@ -1002,8 +1028,15 @@ static int __ppa_attach(struct parport *pb)
dev->mode = PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
init_waitqueue_head(&waiting);
- dev->dev = parport_register_device(pb, "ppa", NULL, ppa_wakeup,
- NULL, 0, dev);
+ temp = find_parent();
+ if (temp)
+ dev->dev_no = temp->dev_no + 1;
+
+ memset(&ppa_cb, 0, sizeof(ppa_cb));
+ ppa_cb.private = dev;
+ ppa_cb.wakeup = ppa_wakeup;
+
+ dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no);
if (!dev->dev)
goto out;
@@ -1110,9 +1143,10 @@ static void ppa_detach(struct parport *pb)
}
static struct parport_driver ppa_driver = {
- .name = "ppa",
- .attach = ppa_attach,
- .detach = ppa_detach,
+ .name = "ppa",
+ .match_port = ppa_attach,
+ .detach = ppa_detach,
+ .devmodel = true,
};
static int __init ppa_driver_init(void)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6992ebc50c87..4dc06a13cab8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -272,8 +272,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
iter = (uint32_t *)buf;
chksum = 0;
- for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
- chksum += le32_to_cpu(*iter++);
+ for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
+ chksum += le32_to_cpu(*iter);
chksum = ~chksum + 1;
*iter = cpu_to_le32(chksum);
} else {
@@ -562,6 +562,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr;
if (unlikely(pci_channel_offline(ha->pdev)))
return -EAGAIN;
@@ -569,9 +570,16 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
- ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+ if (IS_NOCACHE_VPD_TYPE(ha)) {
+ faddr = ha->flt_region_vpd << 2;
+
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_vpd_sec << 2;
+
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size);
+ }
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -1909,7 +1917,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
+ stats = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
if (stats == NULL) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
@@ -1957,7 +1966,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
do_div(pfc_host_stat->seconds_since_last_reset, HZ);
done_free:
- dma_pool_free(ha->s_dma_pool, stats, stats_dma);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+ stats, stats_dma);
done:
return pfc_host_stat;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c26acde797f0..392c147d5793 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2107,6 +2107,195 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
}
static int
+qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_flash_update_caps cap;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+ cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+ (uint64_t)ha->fw_attributes_ext[0] << 32 |
+ (uint64_t)ha->fw_attributes_h << 16 |
+ (uint64_t)ha->fw_attributes;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t online_fw_attr = 0;
+ struct qla_flash_update_caps cap;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
+
+ online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+ (uint64_t)ha->fw_attributes_ext[0] << 32 |
+ (uint64_t)ha->fw_attributes_h << 16 |
+ (uint64_t)ha->fw_attributes;
+
+ if (online_fw_attr != cap.capabilities) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_INVALID_PARAM;
+ return -EINVAL;
+ }
+
+ if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_INVALID_PARAM;
+ return -EINVAL;
+ }
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_bbcr_data bbcr;
+ uint16_t loop_id, topo, sw_cap;
+ uint8_t domain, area, al_pa, state;
+ int rval;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&bbcr, 0, sizeof(bbcr));
+
+ if (vha->flags.bbcr_enable)
+ bbcr.status = QLA_BBCR_STATUS_ENABLED;
+ else
+ bbcr.status = QLA_BBCR_STATUS_DISABLED;
+
+ if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
+ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+ &area, &domain, &topo, &sw_cap);
+ if (rval != QLA_SUCCESS) {
+ bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
+ bbcr.state = QLA_BBCR_STATE_OFFLINE;
+ bbcr.mbx1 = loop_id;
+ goto done;
+ }
+
+ state = (vha->bbcr >> 12) & 0x1;
+
+ if (state) {
+ bbcr.state = QLA_BBCR_STATE_OFFLINE;
+ bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
+ } else {
+ bbcr.state = QLA_BBCR_STATE_ONLINE;
+ bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
+ }
+
+ bbcr.configured_bbscn = vha->bbcr & 0xf;
+ }
+
+done:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct link_statistics *stats = NULL;
+ dma_addr_t stats_dma;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
+ if (qla2x00_reset_active(vha))
+ goto done;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ goto done;
+
+ stats = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
+ if (!stats) {
+ ql_log(ql_log_warn, vha, 0x70e2,
+ "Failed to allocate memory for stats.\n");
+ goto done;
+ }
+
+ memset(stats, 0, sizeof(struct link_statistics));
+
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+
+ if (rval != QLA_SUCCESS)
+ goto done_free;
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
+ (uint8_t *)stats, sizeof(struct link_statistics));
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+done_free:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+ stats, stats_dma);
+done:
+ return rval;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -2161,6 +2350,18 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_SERDES_OP_EX:
return qla8044_serdes_op(bsg_job);
+ case QL_VND_GET_FLASH_UPDATE_CAPS:
+ return qla27xx_get_flash_upd_cap(bsg_job);
+
+ case QL_VND_SET_FLASH_UPDATE_CAPS:
+ return qla27xx_set_flash_upd_cap(bsg_job);
+
+ case QL_VND_GET_BBCR_DATA:
+ return qla27xx_get_bbcr_data(bsg_job);
+
+ case QL_VND_GET_PRIV_STATS:
+ return qla2x00_get_priv_stats(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index d38f9efa56fa..c80192d45536 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -25,6 +25,10 @@
#define QL_VND_FX00_MGMT_CMD 0x12
#define QL_VND_SERDES_OP 0x13
#define QL_VND_SERDES_OP_EX 0x14
+#define QL_VND_GET_FLASH_UPDATE_CAPS 0x15
+#define QL_VND_SET_FLASH_UPDATE_CAPS 0x16
+#define QL_VND_GET_BBCR_DATA 0x17
+#define QL_VND_GET_PRIV_STATS 0x18
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -232,4 +236,34 @@ struct qla_serdes_reg_ex {
uint32_t val;
} __packed;
+struct qla_flash_update_caps {
+ uint64_t capabilities;
+ uint32_t outage_duration;
+ uint8_t reserved[20];
+} __packed;
+
+/* BB_CR Status */
+#define QLA_BBCR_STATUS_DISABLED 0
+#define QLA_BBCR_STATUS_ENABLED 1
+#define QLA_BBCR_STATUS_UNKNOWN 2
+
+/* BB_CR State */
+#define QLA_BBCR_STATE_OFFLINE 0
+#define QLA_BBCR_STATE_ONLINE 1
+
+/* BB_CR Offline Reason Code */
+#define QLA_BBCR_REASON_PORT_SPEED 1
+#define QLA_BBCR_REASON_PEER_PORT 2
+#define QLA_BBCR_REASON_SWITCH 3
+#define QLA_BBCR_REASON_LOGIN_REJECT 4
+
+struct qla_bbcr_data {
+ uint8_t status; /* 1 - enabled, 0 - Disabled */
+ uint8_t state; /* 1 - online, 0 - offline */
+ uint8_t configured_bbscn; /* 0-15 */
+ uint8_t negotiated_bbscn; /* 0-15 */
+ uint8_t offline_reason_code;
+ uint16_t mbx1; /* Port state */
+ uint8_t reserved[9];
+} __packed;
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cd0d94ea7f74..b64c504ff12f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017f | 0x0146 |
+ * | Module Init and Probe | 0x018f | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
* | Mailbox commands | 0x1192 | |
@@ -27,11 +27,12 @@
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x5089 | 0x502b-0x502f |
+ * | | | 0x505e |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
* | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
- * | User Space Interactions | 0x70e65 | 0x7018,0x702e |
+ * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
* | | | 0x7039,0x7045 |
* | | | 0x7073-0x7075 |
@@ -293,8 +294,8 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
WRT_REG_DWORD(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
- while (count--)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for ( ; count--; dmp_reg++)
+ *buf++ = htonl(RD_REG_DWORD(dmp_reg));
return buf;
}
@@ -456,8 +457,8 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
{
uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
- while (count--)
- *buf++ = htons(RD_REG_WORD(dmp_reg++));
+ for ( ; count--; dmp_reg++)
+ *buf++ = htons(RD_REG_WORD(dmp_reg));
}
static inline void *
@@ -732,16 +733,18 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
@@ -751,8 +754,9 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -895,25 +899,25 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
- for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -1095,8 +1099,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1128,8 +1132,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1167,20 +1171,20 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1358,8 +1362,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -1368,8 +1374,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1417,8 +1423,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1481,20 +1487,20 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1679,8 +1685,10 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -1689,8 +1697,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1738,8 +1746,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1802,20 +1810,20 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2022,8 +2030,10 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -2032,8 +2042,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -2081,8 +2091,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2177,20 +2187,20 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9872f3429e53..ceb452dd143c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1060,6 +1060,12 @@ struct mbx_cmd_32 {
#define FSTATE_FATAL_ERROR 4
#define FSTATE_LOOP_BACK_CONN 5
+#define QLA27XX_IMG_STATUS_VER_MAJOR 0x01
+#define QLA27XX_IMG_STATUS_VER_MINOR 0x00
+#define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE
+#define QLA27XX_PRIMARY_IMAGE 1
+#define QLA27XX_SECONDARY_IMAGE 2
+
/*
* Port Database structure definition
* Little endian except where noted.
@@ -1248,13 +1254,41 @@ struct link_statistics {
uint32_t inval_xmit_word_cnt;
uint32_t inval_crc_cnt;
uint32_t lip_cnt;
- uint32_t unused1[0x1a];
+ uint32_t link_up_cnt;
+ uint32_t link_down_loop_init_tmo;
+ uint32_t link_down_los;
+ uint32_t link_down_loss_rcv_clk;
+ uint32_t reserved0[5];
+ uint32_t port_cfg_chg;
+ uint32_t reserved1[11];
+ uint32_t rsp_q_full;
+ uint32_t atio_q_full;
+ uint32_t drop_ae;
+ uint32_t els_proto_err;
+ uint32_t reserved2;
uint32_t tx_frames;
uint32_t rx_frames;
uint32_t discarded_frames;
uint32_t dropped_frames;
- uint32_t unused2[1];
+ uint32_t reserved3;
uint32_t nos_rcvd;
+ uint32_t reserved4[4];
+ uint32_t tx_prjt;
+ uint32_t rcv_exfail;
+ uint32_t rcv_abts;
+ uint32_t seq_frm_miss;
+ uint32_t corr_err;
+ uint32_t mb_rqst;
+ uint32_t nport_full;
+ uint32_t eofa;
+ uint32_t reserved5;
+ uint32_t fpm_recv_word_cnt_lo;
+ uint32_t fpm_recv_word_cnt_hi;
+ uint32_t fpm_disc_word_cnt_lo;
+ uint32_t fpm_disc_word_cnt_hi;
+ uint32_t fpm_xmit_word_cnt_lo;
+ uint32_t fpm_xmit_word_cnt_hi;
+ uint32_t reserved6[70];
};
/*
@@ -3433,14 +3467,20 @@ struct qla_hw_data {
uint32_t flt_region_flt;
uint32_t flt_region_fdt;
uint32_t flt_region_boot;
+ uint32_t flt_region_boot_sec;
uint32_t flt_region_fw;
+ uint32_t flt_region_fw_sec;
uint32_t flt_region_vpd_nvram;
uint32_t flt_region_vpd;
+ uint32_t flt_region_vpd_sec;
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
uint32_t flt_region_gold_fw;
uint32_t flt_region_fcp_prio;
uint32_t flt_region_bootload;
+ uint32_t flt_region_img_status_pri;
+ uint32_t flt_region_img_status_sec;
+ uint8_t active_image;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -3571,6 +3611,7 @@ typedef struct scsi_qla_host {
uint32_t delete_progress:1;
uint32_t fw_tgt_reported:1;
+ uint32_t bbcr_enable:1;
} flags;
atomic_t loop_state;
@@ -3703,8 +3744,19 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
+ uint16_t bbcr;
} scsi_qla_host_t;
+struct qla27xx_image_status {
+ uint8_t image_status_mask;
+ uint16_t generation_number;
+ uint8_t reserved[3];
+ uint8_t ver_minor;
+ uint8_t ver_major;
+ uint32_t checksum;
+ uint32_t signature;
+} __packed;
+
#define SET_VP_IDX 1
#define SET_AL_PA 2
#define RESET_VP_IDX 3
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42bb357bf56b..4c0f3a774799 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1288,7 +1288,9 @@ struct vp_rpt_id_entry_24xx {
uint8_t vp_idx_map[16];
- uint8_t reserved_4[32];
+ uint8_t reserved_4[28];
+ uint16_t bbcr;
+ uint8_t reserved_5[6];
};
#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
@@ -1393,6 +1395,16 @@ struct qla_flt_header {
#define FLT_REG_FCOE_NVRAM_0 0xAA
#define FLT_REG_FCOE_NVRAM_1 0xAC
+/* 27xx */
+#define FLT_REG_IMG_PRI_27XX 0x95
+#define FLT_REG_IMG_SEC_27XX 0x96
+#define FLT_REG_FW_SEC_27XX 0x02
+#define FLT_REG_BOOTLOAD_SEC_27XX 0x9
+#define FLT_REG_VPD_SEC_27XX_0 0x50
+#define FLT_REG_VPD_SEC_27XX_1 0x52
+#define FLT_REG_VPD_SEC_27XX_2 0xD8
+#define FLT_REG_VPD_SEC_27XX_3 0xDA
+
struct qla_flt_region {
uint32_t code;
uint32_t size;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0103e468e357..fe943772fe7b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -90,6 +90,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
+extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
/*
* Global Data in qla_os.c source file.
@@ -121,6 +122,7 @@ extern int ql2xmdcapmask;
extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
+extern int ql2xfwholdabts;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 692a7570b5e1..c56cdb35f3ed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -157,8 +157,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0x2072,
"Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
@@ -2062,6 +2066,10 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
if (IS_P3P_TYPE(ha))
return;
+ /* Hold status IOCBs until ABTS response received. */
+ if (ql2xfwholdabts)
+ ha->fw_options[3] |= BIT_12;
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -2844,7 +2852,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (nv->login_timeout < 4)
nv->login_timeout = 4;
ha->login_timeout = nv->login_timeout;
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -5122,8 +5129,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
dptr = (uint32_t *)nv;
ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
ha->nvram_size);
- for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
- chksum += le32_to_cpu(*dptr++);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+ chksum += le32_to_cpu(*dptr);
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
"Contents of NVRAM\n");
@@ -5274,7 +5281,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -5346,6 +5352,93 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
+uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+{
+ struct qla27xx_image_status pri_image_status, sec_image_status;
+ uint8_t valid_pri_image, valid_sec_image;
+ uint32_t *wptr;
+ uint32_t cnt, chksum, size;
+ struct qla_hw_data *ha = vha->hw;
+
+ valid_pri_image = valid_sec_image = 1;
+ ha->active_image = 0;
+ size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+
+ if (!ha->flt_region_img_status_pri) {
+ valid_pri_image = 0;
+ goto check_sec_image;
+ }
+
+ qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+ ha->flt_region_img_status_pri, size);
+
+ if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary image signature (0x%x) not valid\n",
+ pri_image_status.signature);
+ valid_pri_image = 0;
+ goto check_sec_image;
+ }
+
+ wptr = (uint32_t *)(&pri_image_status);
+ cnt = size;
+
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le32_to_cpu(*wptr);
+ if (chksum) {
+ ql_dbg(ql_dbg_init, vha, 0x018c,
+ "Checksum validation failed for primary image (0x%x)\n",
+ chksum);
+ valid_pri_image = 0;
+ }
+
+check_sec_image:
+ if (!ha->flt_region_img_status_sec) {
+ valid_sec_image = 0;
+ goto check_valid_image;
+ }
+
+ qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+ ha->flt_region_img_status_sec, size);
+
+ if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Secondary image signature(0x%x) not valid\n",
+ sec_image_status.signature);
+ valid_sec_image = 0;
+ goto check_valid_image;
+ }
+
+ wptr = (uint32_t *)(&sec_image_status);
+ cnt = size;
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le32_to_cpu(*wptr);
+ if (chksum) {
+ ql_dbg(ql_dbg_init, vha, 0x018e,
+ "Checksum validation failed for secondary image (0x%x)\n",
+ chksum);
+ valid_sec_image = 0;
+ }
+
+check_valid_image:
+ if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
+ ha->active_image = QLA27XX_PRIMARY_IMAGE;
+ if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
+ if (!ha->active_image ||
+ pri_image_status.generation_number <
+ sec_image_status.generation_number)
+ ha->active_image = QLA27XX_SECONDARY_IMAGE;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+ ha->active_image == 0 ? "default bootld and fw" :
+ ha->active_image == 1 ? "primary" :
+ ha->active_image == 2 ? "secondary" :
+ "Invalid");
+
+ return ha->active_image;
+}
+
static int
qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
uint32_t faddr)
@@ -5368,6 +5461,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
+
/* Validate firmware image by checking version. */
qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
for (i = 0; i < 4; i++)
@@ -6068,8 +6165,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
dptr = (uint32_t *)nv;
- for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
- chksum += le32_to_cpu(*dptr++);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+ chksum += le32_to_cpu(*dptr);
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
"Contents of NVRAM:\n");
@@ -6231,7 +6328,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -6413,12 +6509,17 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ /* Hold status IOCBs until ABTS response received. */
+ if (ql2xfwholdabts)
+ ha->fw_options[3] |= BIT_12;
+
if (!ql2xetsenable)
- return;
+ goto out;
/* Enable ETS Burst. */
memset(ha->fw_options, 0, sizeof(ha->fw_options));
ha->fw_options[2] |= BIT_9;
+out:
qla2x00_set_fw_options(vha, ha->fw_options);
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index a6b7f1588aa4..edc48f3b8230 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -87,8 +87,8 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
__le32 *odest = (__le32 *) dst;
uint32_t iter = bsize >> 2;
- for (; iter ; iter--)
- *odest++ = cpu_to_le32(*isrc++);
+ for ( ; iter--; isrc++)
+ *odest++ = cpu_to_le32(*isrc);
}
static inline void
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 4af95479a9db..5649c200d37c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -934,10 +934,6 @@ skip_rio:
break;
global_port_update:
- /* Port unavailable. */
- ql_log(ql_log_warn, vha, 0x505e,
- "Link is offline.\n");
-
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 87e6758302f6..968b84613096 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1349,6 +1349,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+ if (IS_QLA27XX(vha->hw))
+ mcp->in_mb |= MBX_15;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1400,6 +1402,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
wwn_to_u64(vha->port_name));
}
}
+
+ if (IS_QLA27XX(vha->hw))
+ vha->bbcr = mcp->mb[15];
}
return rval;
@@ -2754,7 +2759,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *siter, *diter, dwords;
+ uint32_t *iter, dwords;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
@@ -2794,10 +2799,11 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
/* Copy over data -- firmware data is LE. */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
"Done %s.\n", __func__);
- dwords = offsetof(struct link_statistics, unused1) / 4;
- siter = diter = &stats->link_fail_cnt;
- while (dwords--)
- *diter++ = le32_to_cpu(*siter++);
+ dwords = offsetof(struct link_statistics,
+ link_up_cnt) / 4;
+ iter = &stats->link_fail_cnt;
+ for ( ; dwords--; iter++)
+ le32_to_cpus(iter);
}
} else {
/* Failed. */
@@ -2814,7 +2820,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *siter, *diter, dwords;
+ uint32_t *iter, dwords;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
@@ -2843,9 +2849,9 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
"Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
- siter = diter = &stats->link_fail_cnt;
- while (dwords--)
- *diter++ = le32_to_cpu(*siter++);
+ iter = &stats->link_fail_cnt;
+ for ( ; dwords--; iter++)
+ le32_to_cpus(iter);
}
} else {
/* Failed. */
@@ -3612,6 +3618,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ /* buffer to buffer credit flag */
+ vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0;
+
/* FA-WWN is only for physical port */
if (!vp_idx) {
void *wwpn = ha->init_cb->port_name;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f6c7ce35b542..7c0b60ca158f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -233,6 +233,13 @@ MODULE_PARM_DESC(ql2xexchoffld,
"Number of exchanges to offload. "
"0 (Default)- Disabled.");
+int ql2xfwholdabts = 0;
+module_param(ql2xfwholdabts, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xfwholdabts,
+ "Allow FW to hold status IOCB until ABTS rsp received. "
+ "0 (Default) Do not set fw option. "
+ "1 - Set fw option to hold ABTS.");
+
/*
* SCSI host template entry points
*/
@@ -2216,6 +2223,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2271:
@@ -2223,6 +2231,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2261:
@@ -2230,6 +2239,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3272ed5bbcc7..5e9392316425 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -610,8 +610,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
wptr = (uint16_t *)req->ring;
cnt = sizeof(struct qla_flt_location) >> 1;
- for (chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
@@ -702,8 +702,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
}
cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
- for (chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_log(ql_log_fatal, vha, 0x0048,
"Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
@@ -846,6 +846,38 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (ha->port_no == 1)
ha->flt_region_nvram = start;
break;
+ case FLT_REG_IMG_PRI_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_img_status_pri = start;
+ break;
+ case FLT_REG_IMG_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_img_status_sec = start;
+ break;
+ case FLT_REG_FW_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_fw_sec = start;
+ break;
+ case FLT_REG_BOOTLOAD_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_boot_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_0:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_1:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_2:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_3:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
}
}
goto done;
@@ -898,9 +930,8 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
fdt->sig[3] != 'D')
goto no_flash_data;
- for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
- cnt++)
- chksum += le16_to_cpu(*wptr++);
+ for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_dbg(ql_dbg_init, vha, 0x004c,
"Inconsistent FDT detected:"
@@ -995,7 +1026,8 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
} else {
- ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr);
+ wptr++;
ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
}
ql_dbg(ql_dbg_init, vha, 0x004e,
@@ -1072,10 +1104,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)data,
ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
- cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
- sizeof(struct qla_npiv_entry)) >> 1;
- for (wptr = data, chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
+ for (wptr = data, chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_dbg(ql_dbg_user, vha, 0x7092,
"Inconsistent NPIV-Config "
@@ -2989,6 +3020,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
uint8_t code_type, last_image;
int i;
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr = 0;
+
+ pcihdr = pcids = 0;
if (IS_P3P_TYPE(ha))
return ret;
@@ -3002,9 +3036,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
-
- /* Begin with first PCI expansion ROM header. */
pcihdr = ha->flt_region_boot << 2;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ pcihdr = ha->flt_region_boot_sec << 2;
+
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
@@ -3077,8 +3113,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
+ faddr = ha->flt_region_fw;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
- qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
+ qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6d31faa8c57b..0bc93fa46dae 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.26-k"
+#define QLA2XXX_VERSION "8.07.00.33-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5d4f8e67fb25..638f72c5ab05 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -46,11 +46,13 @@ int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
ret_val = qla4_83xx_set_win_base(ha, addr);
- if (ret_val == QLA_SUCCESS)
+ if (ret_val == QLA_SUCCESS) {
*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
- else
+ } else {
+ *data = 0xffffffff;
ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
__func__, addr);
+ }
return ret_val;
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 676385ff28ef..69bfc0a1aea3 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -710,7 +710,7 @@ static int qpti_map_regs(struct qlogicpti *qpti)
"PTI Qlogic/ISP");
if (!qpti->qregs) {
printk("PTI: Qlogic/ISP registers are unmappable\n");
- return -1;
+ return -ENODEV;
}
if (qpti->is_pti) {
qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
@@ -718,7 +718,7 @@ static int qpti_map_regs(struct qlogicpti *qpti)
"PTI Qlogic/ISP statreg");
if (!qpti->sreg) {
printk("PTI: Qlogic/ISP status register is unmappable\n");
- return -1;
+ return -ENODEV;
}
}
return 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index bbfbfd9e5aa3..3408578b08d6 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,6 +220,8 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
+ {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
+ {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8c6e31874171..8106515d1df8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2700,6 +2700,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ scsi_rescan_device(&sdev->sdev_gendev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 459abe1dcc87..b44c1bb687a2 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -139,6 +139,16 @@ static int scsi_bus_resume_common(struct device *dev,
else
fn = NULL;
+ /*
+ * Forcibly set runtime PM status of request queue to "active" to
+ * make sure we can again get requests from the queue (see also
+ * blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
+ blk_set_runtime_active(to_scsi_device(dev)->request_queue);
+
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6a820668d442..97074c91e328 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -43,6 +43,7 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dh.h>
#include <scsi/scsi_eh.h>
#include "scsi_priv.h"
@@ -518,7 +519,8 @@ void scsi_target_reap(struct scsi_target *starget)
}
/**
- * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
+ * scsi_sanitize_inquiry_string - remove non-graphical chars from an
+ * INQUIRY result string
* @s: INQUIRY result string to sanitize
* @len: length of the string
*
@@ -531,7 +533,7 @@ void scsi_target_reap(struct scsi_target *starget)
* string terminator, so all the following characters are set to
* spaces.
**/
-static void sanitize_inquiry_string(unsigned char *s, int len)
+void scsi_sanitize_inquiry_string(unsigned char *s, int len)
{
int terminated = 0;
@@ -542,6 +544,7 @@ static void sanitize_inquiry_string(unsigned char *s, int len)
*s = ' ';
}
}
+EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
/**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
@@ -627,9 +630,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
if (result == 0) {
- sanitize_inquiry_string(&inq_result[8], 8);
- sanitize_inquiry_string(&inq_result[16], 16);
- sanitize_inquiry_string(&inq_result[32], 4);
+ scsi_sanitize_inquiry_string(&inq_result[8], 8);
+ scsi_sanitize_inquiry_string(&inq_result[16], 16);
+ scsi_sanitize_inquiry_string(&inq_result[32], 4);
response_len = inq_result[4] + 5;
if (response_len > 255)
@@ -962,6 +965,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_SYNC_ALUA)
+ sdev->synchronous_alua = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -1519,9 +1525,14 @@ EXPORT_SYMBOL(scsi_add_device);
void scsi_rescan_device(struct device *dev)
{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
device_lock(dev);
- scsi_attach_vpd(to_scsi_device(dev));
+ scsi_attach_vpd(sdev);
+
+ if (sdev->handler && sdev->handler->rescan)
+ sdev->handler->rescan(sdev);
if (dev->driver && try_module_get(dev->driver->owner)) {
struct scsi_driver *drv = to_scsi_driver(dev->driver);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 00bc7218a7f8..d16441961f3a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,33 @@ const char *scsi_host_state_name(enum scsi_host_state state)
return name;
}
+static const struct {
+ unsigned char value;
+ char *name;
+} sdev_access_states[] = {
+ { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" },
+ { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" },
+ { SCSI_ACCESS_STATE_STANDBY, "standby" },
+ { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" },
+ { SCSI_ACCESS_STATE_LBA, "lba-dependent" },
+ { SCSI_ACCESS_STATE_OFFLINE, "offline" },
+ { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
+};
+
+const char *scsi_access_state_name(unsigned char state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) {
+ if (sdev_access_states[i].value == state) {
+ name = sdev_access_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
static int check_set(unsigned long long *val, char *src)
{
char *last;
@@ -973,6 +1000,43 @@ sdev_store_dh_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state,
sdev_store_dh_state);
+
+static ssize_t
+sdev_show_access_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ unsigned char access_state;
+ const char *access_state_name;
+
+ if (!sdev->handler)
+ return -EINVAL;
+
+ access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK);
+ access_state_name = scsi_access_state_name(access_state);
+
+ return sprintf(buf, "%s\n",
+ access_state_name ? access_state_name : "unknown");
+}
+static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL);
+
+static ssize_t
+sdev_show_preferred_path(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->handler)
+ return -EINVAL;
+
+ if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL);
#endif
static ssize_t
@@ -1020,9 +1084,33 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
!sdev->host->hostt->change_queue_depth)
return 0;
+#ifdef CONFIG_SCSI_DH
+ if (attr == &dev_attr_access_state.attr &&
+ !sdev->handler)
+ return 0;
+ if (attr == &dev_attr_preferred_path.attr &&
+ !sdev->handler)
+ return 0;
+#endif
return attr->mode;
}
+static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+
+ if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83)
+ return 0;
+
+ return S_IRUGO;
+}
+
/* Default template for device attributes. May NOT be modified */
static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_device_blocked.attr,
@@ -1047,6 +1135,8 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_wwid.attr,
#ifdef CONFIG_SCSI_DH
&dev_attr_dh_state.attr,
+ &dev_attr_access_state.attr,
+ &dev_attr_preferred_path.attr,
#endif
&dev_attr_queue_ramp_up_period.attr,
REF_EVT(media_change),
@@ -1068,6 +1158,7 @@ static struct attribute_group scsi_sdev_attr_group = {
.attrs = scsi_sdev_attrs,
.bin_attrs = scsi_sdev_bin_attrs,
.is_visible = scsi_sdev_attr_is_visible,
+ .is_bin_visible = scsi_sdev_bin_attr_is_visible,
};
static const struct attribute_group *scsi_sdev_attr_groups[] = {
@@ -1129,13 +1220,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
scsi_autopm_get_device(sdev);
- error = device_add(&sdev->sdev_gendev);
- if (error) {
- sdev_printk(KERN_INFO, sdev,
- "failed to add device: %d\n", error);
- return error;
- }
-
error = scsi_dh_add_device(sdev);
if (error)
/*
@@ -1144,6 +1228,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
sdev_printk(KERN_INFO, sdev,
"failed to add device handler: %d\n", error);
+ error = device_add(&sdev->sdev_gendev);
+ if (error) {
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add device: %d\n", error);
+ scsi_dh_remove_device(sdev);
+ return error;
+ }
+
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
if (error) {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e4b3d8f4fd85..441481623fb9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4308,6 +4308,8 @@ static const struct {
{ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
{ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
{ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
+ {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" },
+ {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" },
};
char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d749da765df1..5a5457ac9cdb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
*/
if (sdkp->lbprz) {
q->limits.discard_alignment = 0;
- q->limits.discard_granularity = 1;
+ q->limits.discard_granularity = logical_block_size;
} else {
q->limits.discard_alignment = sdkp->unmap_alignment *
logical_block_size;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5e820674432c..ae7d9bdf409c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3b3b56f4a830..82ed99848378 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
};
MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
-static __init int
-sim710_eisa_probe(struct device *dev)
+static int sim710_eisa_probe(struct device *dev)
{
struct eisa_device *edev = to_eisa_device(dev);
unsigned long io_addr = edev->base_addr;
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index aebe75320ed3..ab0e06b0b4ff 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -75,7 +75,7 @@ snic_ver_enc(const char *s)
continue;
}
- if (i > 4 || !isdigit(c))
+ if (i > 3 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2e522951b619..607b0a505844 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
Michael Schaefer, J"org Weule, and Eric Youngdale.
- Copyright 1992 - 2010 Kai Makisara
+ Copyright 1992 - 2016 Kai Makisara
email Kai.Makisara@kolumbus.fi
Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static const char *verstr = "20101219";
+static const char *verstr = "20160209";
#include <linux/module.h>
@@ -3296,7 +3296,10 @@ static int switch_partition(struct scsi_tape *STp)
#define PP_OFF_RESERVED 7
#define PP_BIT_IDP 0x20
+#define PP_BIT_FDP 0x80
#define PP_MSK_PSUM_MB 0x10
+#define PP_MSK_PSUM_UNITS 0x18
+#define PP_MSK_POFM 0x04
/* Get the number of partitions on the tape. As a side effect reads the
mode page into the tape buffer. */
@@ -3322,6 +3325,29 @@ static int nbr_partitions(struct scsi_tape *STp)
}
+static int format_medium(struct scsi_tape *STp, int format)
+{
+ int result = 0;
+ int timeout = STp->long_timeout;
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+
+ memset(scmd, 0, MAX_COMMAND_SIZE);
+ scmd[0] = FORMAT_UNIT;
+ scmd[2] = format;
+ if (STp->immediate) {
+ scmd[1] |= 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ DEBC_printk(STp, "Sending FORMAT MEDIUM\n");
+ SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
+ timeout, MAX_RETRIES, 1);
+ if (!SRpnt)
+ result = STp->buffer->syscall_result;
+ return result;
+}
+
+
/* Partition the tape into two partitions if size > 0 or one partition if
size == 0.
@@ -3340,11 +3366,16 @@ static int nbr_partitions(struct scsi_tape *STp)
and 10 when 1 partition is defined (information from Eric Lee Green). This is
is acceptable also to some other old drives and enforced if the first partition
size field is used for the first additional partition size.
+
+ For drives that advertize SCSI-3 or newer, use the SSC-3 methods.
*/
static int partition_tape(struct scsi_tape *STp, int size)
{
int result;
+ int target_partition;
+ bool scsi3 = STp->device->scsi_level >= SCSI_3, needs_format = false;
int pgo, psd_cnt, psdo;
+ int psum = PP_MSK_PSUM_MB, units = 0;
unsigned char *bp;
result = read_mode_page(STp, PART_PAGE, 0);
@@ -3352,6 +3383,12 @@ static int partition_tape(struct scsi_tape *STp, int size)
DEBC_printk(STp, "Can't read partition mode page.\n");
return result;
}
+ target_partition = 1;
+ if (size < 0) {
+ target_partition = 0;
+ size = -size;
+ }
+
/* The mode page is in the buffer. Let's modify it and write it. */
bp = (STp->buffer)->b_data;
pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH];
@@ -3359,9 +3396,52 @@ static int partition_tape(struct scsi_tape *STp, int size)
bp[pgo + MP_OFF_PAGE_LENGTH] + 2);
psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2;
+
+ if (scsi3) {
+ needs_format = (bp[pgo + PP_OFF_FLAGS] & PP_MSK_POFM) != 0;
+ if (needs_format && size == 0) {
+ /* No need to write the mode page when clearing
+ * partitioning
+ */
+ DEBC_printk(STp, "Formatting tape with one partition.\n");
+ result = format_medium(STp, 0);
+ goto out;
+ }
+ if (needs_format) /* Leave the old value for HP DATs claiming SCSI_3 */
+ psd_cnt = 2;
+ if ((bp[pgo + PP_OFF_FLAGS] & PP_MSK_PSUM_UNITS) == PP_MSK_PSUM_UNITS) {
+ /* Use units scaling for large partitions if the device
+ * suggests it and no precision lost. Required for IBM
+ * TS1140/50 drives that don't support MB units.
+ */
+ if (size >= 1000 && (size % 1000) == 0) {
+ size /= 1000;
+ psum = PP_MSK_PSUM_UNITS;
+ units = 9; /* GB */
+ }
+ }
+ /* Try it anyway if too large to specify in MB */
+ if (psum == PP_MSK_PSUM_MB && size >= 65534) {
+ size /= 1000;
+ psum = PP_MSK_PSUM_UNITS;
+ units = 9; /* GB */
+ }
+ }
+
+ if (size >= 65535 || /* Does not fit into two bytes */
+ (target_partition == 0 && psd_cnt < 2)) {
+ result = -EINVAL;
+ goto out;
+ }
+
psdo = pgo + PART_PAGE_FIXED_LENGTH;
- if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) {
- bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */
+ /* The second condition is for HP DDS which use only one partition size
+ * descriptor
+ */
+ if (target_partition > 0 &&
+ (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS] ||
+ bp[pgo + PP_OFF_MAX_ADD_PARTS] != 1)) {
+ bp[psdo] = bp[psdo + 1] = 0xff; /* Rest to partition 0 */
psdo += 2;
}
memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2);
@@ -3370,7 +3450,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS],
bp[pgo + PP_OFF_NBR_ADD_PARTS]);
- if (size <= 0) {
+ if (size == 0) {
bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0;
if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS])
bp[pgo + MP_OFF_PAGE_LENGTH] = 6;
@@ -3378,22 +3458,37 @@ static int partition_tape(struct scsi_tape *STp, int size)
} else {
bp[psdo] = (size >> 8) & 0xff;
bp[psdo + 1] = size & 0xff;
+ if (target_partition == 0)
+ bp[psdo + 2] = bp[psdo + 3] = 0xff;
bp[pgo + 3] = 1;
if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8)
bp[pgo + MP_OFF_PAGE_LENGTH] = 8;
- DEBC_printk(STp, "Formatting tape with two partitions "
- "(1 = %d MB).\n", size);
+ DEBC_printk(STp,
+ "Formatting tape with two partitions (%i = %d MB).\n",
+ target_partition, units > 0 ? size * 1000 : size);
}
bp[pgo + PP_OFF_PART_UNITS] = 0;
bp[pgo + PP_OFF_RESERVED] = 0;
- bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB;
+ if (size != 1 || units != 0) {
+ bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | psum |
+ (bp[pgo + PP_OFF_FLAGS] & 0x07);
+ bp[pgo + PP_OFF_PART_UNITS] = units;
+ } else
+ bp[pgo + PP_OFF_FLAGS] = PP_BIT_FDP |
+ (bp[pgo + PP_OFF_FLAGS] & 0x1f);
+ bp[pgo + MP_OFF_PAGE_LENGTH] = 6 + psd_cnt * 2;
result = write_mode_page(STp, PART_PAGE, 1);
+
+ if (!result && needs_format)
+ result = format_medium(STp, 1);
+
if (result) {
st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n");
result = (-EIO);
}
+out:
return result;
}
@@ -3570,8 +3665,13 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EINVAL);
goto out;
}
- if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 ||
- (i = partition_tape(STp, mtc.mt_count)) < 0) {
+ i = do_load_unload(STp, file, 1);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ i = partition_tape(STp, mtc.mt_count);
+ if (i < 0) {
retval = i;
goto out;
}
@@ -3581,7 +3681,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
STp->ps[i].last_block_valid = 0;
}
STp->partition = STp->new_partition = 0;
- STp->nbr_partitions = 1; /* Bad guess ?-) */
+ STp->nbr_partitions = mtc.mt_count != 0 ? 2 : 1;
STps->drv_block = STps->drv_file = 0;
retval = 0;
goto out;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 2de28d7a0b04..5b23175a584c 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1,7 +1,7 @@
/*
* SuperTrak EX Series Storage Controller driver for Linux
*
- * Copyright (C) 2005-2009 Promise Technology Inc.
+ * Copyright (C) 2005-2015 Promise Technology Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,11 +38,11 @@
#include <scsi/scsi_eh.h>
#define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "4.6.0000.4"
-#define ST_VER_MAJOR 4
-#define ST_VER_MINOR 6
-#define ST_OEM 0
-#define ST_BUILD_VER 4
+#define ST_DRIVER_VERSION "5.00.0000.01"
+#define ST_VER_MAJOR 5
+#define ST_VER_MINOR 00
+#define ST_OEM 0000
+#define ST_BUILD_VER 01
enum {
/* MU register offset */
@@ -84,6 +84,8 @@ enum {
MU_STATE_STARTED = 2,
MU_STATE_RESETTING = 3,
MU_STATE_FAILED = 4,
+ MU_STATE_STOP = 5,
+ MU_STATE_NOCONNECT = 6,
MU_MAX_DELAY = 120,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
@@ -165,6 +167,14 @@ enum {
ST_ADDITIONAL_MEM = 0x200000,
ST_ADDITIONAL_MEM_MIN = 0x80000,
+ PMIC_SHUTDOWN = 0x0D,
+ PMIC_REUMSE = 0x10,
+ ST_IGNORED = -1,
+ ST_NOTHANDLED = 7,
+ ST_S3 = 3,
+ ST_S4 = 4,
+ ST_S5 = 5,
+ ST_S6 = 6,
};
struct st_sgitem {
@@ -328,6 +338,7 @@ struct st_hba {
u16 rq_count;
u16 rq_size;
u16 sts_count;
+ u8 supports_pm;
};
struct st_card_info {
@@ -536,6 +547,27 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
readl(hba->mmio_base + YH2I_REQ); /* flush */
}
+static void return_abnormal_state(struct st_hba *hba, int status)
+{
+ struct st_ccb *ccb;
+ unsigned long flags;
+ u16 tag;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ for (tag = 0; tag < hba->host->can_queue; tag++) {
+ ccb = &hba->ccb[tag];
+ if (ccb->req == NULL)
+ continue;
+ ccb->req = NULL;
+ if (ccb->cmd) {
+ scsi_dma_unmap(ccb->cmd);
+ ccb->cmd->result = status << 16;
+ ccb->cmd->scsi_done(ccb->cmd);
+ ccb->cmd = NULL;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
static int
stex_slave_config(struct scsi_device *sdev)
{
@@ -559,8 +591,12 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
id = cmd->device->id;
lun = cmd->device->lun;
hba = (struct st_hba *) &host->hostdata[0];
-
- if (unlikely(hba->mu_status == MU_STATE_RESETTING))
+ if (hba->mu_status == MU_STATE_NOCONNECT) {
+ cmd->result = DID_NO_CONNECT;
+ done(cmd);
+ return 0;
+ }
+ if (unlikely(hba->mu_status != MU_STATE_STARTED))
return SCSI_MLQUEUE_HOST_BUSY;
switch (cmd->cmnd[0]) {
@@ -1259,10 +1295,8 @@ static void stex_ss_reset(struct st_hba *hba)
static int stex_do_reset(struct st_hba *hba)
{
- struct st_ccb *ccb;
unsigned long flags;
unsigned int mu_status = MU_STATE_RESETTING;
- u16 tag;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->mu_status == MU_STATE_STARTING) {
@@ -1296,20 +1330,8 @@ static int stex_do_reset(struct st_hba *hba)
else if (hba->cardtype == st_yel)
stex_ss_reset(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- for (tag = 0; tag < hba->host->can_queue; tag++) {
- ccb = &hba->ccb[tag];
- if (ccb->req == NULL)
- continue;
- ccb->req = NULL;
- if (ccb->cmd) {
- scsi_dma_unmap(ccb->cmd);
- ccb->cmd->result = DID_RESET << 16;
- ccb->cmd->scsi_done(ccb->cmd);
- ccb->cmd = NULL;
- }
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return_abnormal_state(hba, DID_RESET);
if (stex_handshake(hba) == 0)
return 0;
@@ -1560,6 +1582,25 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->cardtype = (unsigned int) id->driver_data;
ci = &stex_card_info[hba->cardtype];
+ switch (id->subdevice) {
+ case 0x4221:
+ case 0x4222:
+ case 0x4223:
+ case 0x4224:
+ case 0x4225:
+ case 0x4226:
+ case 0x4227:
+ case 0x4261:
+ case 0x4262:
+ case 0x4263:
+ case 0x4264:
+ case 0x4265:
+ break;
+ default:
+ if (hba->cardtype == st_yel)
+ hba->supports_pm = 1;
+ }
+
sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
if (hba->cardtype == st_yel)
sts_offset += (ci->sts_count+1) * sizeof(u32);
@@ -1685,7 +1726,7 @@ out_disable:
return err;
}
-static void stex_hba_stop(struct st_hba *hba)
+static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
{
struct req_msg *req;
struct st_msg_header *msg_h;
@@ -1694,6 +1735,15 @@ static void stex_hba_stop(struct st_hba *hba)
u16 tag = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (hba->cardtype == st_yel && hba->supports_pm == 1)
+ {
+ if(st_sleep_mic == ST_NOTHANDLED)
+ {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return;
+ }
+ }
req = hba->alloc_rq(hba);
if (hba->cardtype == st_yel) {
msg_h = (struct st_msg_header *)req - 1;
@@ -1701,11 +1751,18 @@ static void stex_hba_stop(struct st_hba *hba)
} else
memset(req, 0, hba->rq_size);
- if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
+ if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel)
+ && st_sleep_mic == ST_IGNORED) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
req->cdb[3] = CTLR_SHUTDOWN;
+ } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) {
+ req->cdb[0] = MGT_CMD;
+ req->cdb[1] = MGT_CMD_SIGNATURE;
+ req->cdb[2] = CTLR_CONFIG_CMD;
+ req->cdb[3] = PMIC_SHUTDOWN;
+ req->cdb[4] = st_sleep_mic;
} else {
req->cdb[0] = CONTROLLER_CMD;
req->cdb[1] = CTLR_POWER_STATE_CHANGE;
@@ -1725,10 +1782,12 @@ static void stex_hba_stop(struct st_hba *hba)
while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
hba->ccb[tag].req_type = 0;
+ hba->mu_status = MU_STATE_STOP;
return;
}
msleep(1);
}
+ hba->mu_status = MU_STATE_STOP;
}
static void stex_hba_free(struct st_hba *hba)
@@ -1751,9 +1810,11 @@ static void stex_remove(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
+ hba->mu_status = MU_STATE_NOCONNECT;
+ return_abnormal_state(hba, DID_NO_CONNECT);
scsi_remove_host(hba->host);
- stex_hba_stop(hba);
+ scsi_block_requests(hba->host);
stex_hba_free(hba);
@@ -1766,9 +1827,43 @@ static void stex_shutdown(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
- stex_hba_stop(hba);
+ if (hba->supports_pm == 0)
+ stex_hba_stop(hba, ST_IGNORED);
+ else
+ stex_hba_stop(hba, ST_S5);
+}
+
+static int stex_choice_sleep_mic(pm_message_t state)
+{
+ switch (state.event) {
+ case PM_EVENT_SUSPEND:
+ return ST_S3;
+ case PM_EVENT_HIBERNATE:
+ return ST_S4;
+ default:
+ return ST_NOTHANDLED;
+ }
+}
+
+static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ if (hba->cardtype == st_yel && hba->supports_pm == 1)
+ stex_hba_stop(hba, stex_choice_sleep_mic(state));
+ else
+ stex_hba_stop(hba, ST_IGNORED);
+ return 0;
}
+static int stex_resume(struct pci_dev *pdev)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ hba->mu_status = MU_STATE_STARTING;
+ stex_handshake(hba);
+ return 0;
+}
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
static struct pci_driver stex_pci_driver = {
@@ -1777,6 +1872,8 @@ static struct pci_driver stex_pci_driver = {
.probe = stex_probe,
.remove = stex_remove,
.shutdown = stex_shutdown,
+ .suspend = stex_suspend,
+ .resume = stex_resume,
};
static int __init stex_init(void)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 292c04eec9ad..3ddcabb790a8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
- if ((asc == 0x2a) && (ascq == 0x9)) {
+ case SRB_STATUS_ABORTED:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ (asc == 0x2a) && (ascq == 0x9)) {
do_work = true;
process_err_fn = storvsc_device_scan;
/*
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 42c459a9d3fe..54a16cef0367 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -170,7 +170,7 @@ enum ufs_desc_max_size {
* of descriptor header.
*/
QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
QUERY_DESC_POWER_MAX_SIZE = 0x62,
QUERY_DESC_RFU_MAX_SIZE = 0x00,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 85cd2564c157..9c1b94bef8f3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -58,13 +58,25 @@
#define QUERY_REQ_RETRIES 10
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 30 /* msec */
+/*
+ * Query request timeout for fDeviceInit flag
+ * fDeviceInit query response time for some devices is too large that default
+ * QUERY_REQ_TIMEOUT may not be enough for such devices.
+ */
+#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -92,7 +104,7 @@ static u32 ufs_query_desc_max_size[] = {
QUERY_DESC_INTERCONNECT_MAX_SIZE,
QUERY_DESC_STRING_MAX_SIZE,
QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE,
+ QUERY_DESC_GEOMETRY_MAX_SIZE,
QUERY_DESC_POWER_MAX_SIZE,
QUERY_DESC_RFU_MAX_SIZE,
};
@@ -190,6 +202,10 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
+{
+ return tag >= 0 && tag < hba->nutrs;
+}
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
{
@@ -360,6 +376,16 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -374,11 +400,9 @@ static inline int ufshcd_get_lists_status(u32 reg)
* 1 UTRLRDY
* 2 UTMRLRDY
* 3 UCRDY
- * 4 HEI
- * 5 DEI
- * 6-7 reserved
+ * 4-7 reserved
*/
- return (((reg) & (0xFF)) >> 1) ^ (0x07);
+ return ((reg & 0xFF) >> 1) ^ 0x07;
}
/**
@@ -582,6 +606,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
@@ -697,7 +726,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba))
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -953,13 +983,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
*
* Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
* with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
{
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
@@ -967,7 +999,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
return -EIO;
}
- init_completion(&uic_cmd->done);
+ if (completion)
+ init_completion(&uic_cmd->done);
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -992,7 +1025,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -1035,6 +1068,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
cpu_to_le32(lower_32_bits(sg->dma_address));
prd_table[i].upper_addr =
cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@ -1117,7 +1151,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
/* Transfer request descriptor header fields */
req_desc->header.dword_0 = cpu_to_le32(dword_0);
-
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
@@ -1125,6 +1160,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*/
req_desc->header.dword_2 =
cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
}
/**
@@ -1137,6 +1176,7 @@ static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
/* command descriptor fields */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -1151,8 +1191,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
- (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1189,6 +1232,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(descp, query->descriptor, len);
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -1201,6 +1245,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
ucd_req_ptr->header.dword_0 =
UPIU_HEADER_DWORD(
UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1293,6 +1342,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba = shost_priv(host);
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
@@ -1312,6 +1367,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
cmd->scsi_done(cmd);
goto out_unlock;
}
+
+ /* if error handling is in progress, don't issue commands */
+ if (ufshcd_eh_in_progress(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* acquire the tag to make sure device cmds don't use it */
@@ -1475,9 +1537,17 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
if (!time_left) {
err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
- /* sucessfully cleared the command, retry if needed */
+ /* successfully cleared the command, retry if needed */
err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
}
return err;
@@ -1555,6 +1625,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1591,6 +1663,29 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
/**
* ufshcd_query_flag() - API function for sending flag query requests
* hba: per-adapter instance
@@ -1600,12 +1695,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -1638,7 +1734,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
+ timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
dev_err(hba->dev,
@@ -1722,6 +1821,43 @@ out:
}
/**
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, QUERY_REQ_RETRIES);
+ return ret;
+}
+
+/**
* ufshcd_query_descriptor - API function for sending descriptor requests
* hba: per-adapter instance
* opcode: attribute opcode
@@ -2128,6 +2264,7 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *set = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
@@ -2135,10 +2272,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (!retries)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ retries);
return ret;
}
@@ -2163,6 +2308,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *get = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
struct ufs_pa_layer_attr orig_pwr_info;
struct ufs_pa_layer_attr temp_pwr_info;
bool pwr_mode_change = false;
@@ -2193,14 +2339,19 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret) {
- dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
- get, UIC_GET_ATTR_ID(attr_sel), ret);
- goto out;
- }
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
- if (mib_val)
+ if (!retries)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel), retries);
+
+ if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@ -2233,6 +2384,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
+ bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
@@ -2240,15 +2392,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;
- ret = __ufshcd_send_uic_cmd(hba, cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
- goto out;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
}
- ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -2274,7 +2428,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
out:
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
@@ -2315,13 +2472,65 @@ out:
return ret;
}
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_link_recovery(struct ufs_hba *hba)
{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ dev_err(hba->dev, "%s: link recovery failed, err %d",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret;
struct uic_command uic_cmd = {0};
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+ __func__, ret);
+
+ /*
+ * If link recovery fails then return error so that caller
+ * don't retry the hibern8 enter again.
+ */
+ if (ufshcd_link_recovery(hba))
+ ret = -ENOLINK;
+ }
- return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
+
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret || ret == -ENOLINK)
+ goto out;
+ }
+out:
+ return ret;
}
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
@@ -2332,8 +2541,9 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
- ufshcd_set_link_off(hba);
- ret = ufshcd_host_reset_and_restore(hba);
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
}
return ret;
@@ -2513,17 +2723,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
- int i, retries, err = 0;
+ int i;
+ int err;
bool flag_res = 1;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* Set the fDeviceInit flag */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -2531,18 +2736,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
goto out;
}
- /* poll for max. 100 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 100 && !err && flag_res; i++) {
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- err = ufshcd_query_flag(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
- err);
- }
- }
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
if (err)
dev_err(hba->dev,
"%s reading fDeviceInit flag failed with error %d\n",
@@ -2563,7 +2761,7 @@ out:
* To bring UFS host controller to operational state,
* 1. Enable required interrupts
* 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base addres
+ * 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
* Returns 0 on success, non-zero value on failure
@@ -2593,8 +2791,13 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
- * DEI, HEI bits must be 0
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
if (!(ufshcd_get_lists_status(reg))) {
@@ -3090,7 +3293,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
scsi_status = result & MASK_SCSI_STATUS;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
- if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -3222,7 +3438,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask & ~mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask &= ~mask;
@@ -3250,7 +3466,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask | mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask |= mask;
@@ -3276,7 +3492,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
if (hba->auto_bkops_enabled)
goto out;
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
@@ -3325,7 +3541,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
goto out;
}
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
@@ -3356,7 +3572,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
}
@@ -3419,7 +3635,7 @@ static int ufshcd_urgent_bkops(struct ufs_hba *hba)
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
}
@@ -3645,16 +3861,20 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status;
+ u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status) {
+ if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- ufshcd_sl_intr(hba, intr_status);
+
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
retval = IRQ_HANDLED;
}
spin_unlock(hba->host->host_lock);
@@ -3740,6 +3960,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3845,13 +4069,23 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
ufshcd_hold(hba, false);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
- if (!(test_bit(tag, &hba->outstanding_reqs)))
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
goto out;
+ }
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!(reg & (1 << tag))) {
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
@@ -3905,7 +4139,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
- __clear_bit(tag, &hba->outstanding_reqs);
+ ufshcd_outstanding_req_clear(hba, tag);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4155,9 +4389,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4262,7 +4496,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
hba->wlun_dev_clr_ua = true;
if (ufshcd_get_max_pwr_mode(hba)) {
@@ -4276,6 +4509,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__, ret);
}
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -4285,8 +4520,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* clear any previous UFS device information */
memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
hba->dev_info.f_power_on_wp_en = flag;
if (!hba->is_init_prefetch)
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2570d9477b37..e3931d0c94eb 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,6 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -177,7 +178,7 @@ struct ufshcd_lrb {
};
/**
- * struct ufs_query - holds relevent data structures for query request
+ * struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
* @response: response upiu and response
@@ -681,6 +682,9 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
+/* Expose Query-Request API */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);