summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_diag.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c84
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c76
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c131
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c16
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c18
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c19
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c14
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c4
-rw-r--r--drivers/scsi/bfa/bfa_svc.c7
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c18
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/fnic/vnic_dev.c12
-rw-r--r--drivers/scsi/fnic/vnic_wq.c4
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c26
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c81
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/Makefile3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c266
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debugfs.c157
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/mvsas/mv_init.c6
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_els.c10
-rw-r--r--drivers/scsi/qedf/qedf_io.c48
-rw-r--r--drivers/scsi/qedf/qedf_main.c135
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c21
-rw-r--r--drivers/scsi/qedi/qedi_main.c22
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c866
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h443
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h728
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h768
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c287
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c123
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h64
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c208
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c323
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h232
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c2048
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c230
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c68
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/snic/snic.h2
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/sr.c26
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c30
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c10
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c61
-rw-r--r--drivers/scsi/ufs/ufs.h43
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c515
-rw-r--r--drivers/scsi/ufs/ufshcd.h45
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c30
-rw-r--r--drivers/target/loopback/tcm_loop.c36
-rw-r--r--drivers/target/target_core_alua.c10
-rw-r--r--drivers/target/target_core_configfs.c82
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/target_core_user.c177
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/scsi/sas.h8
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h10
-rw-r--r--include/target/target_core_backend.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/qla.h7
149 files changed, 6652 insertions, 3967 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ad9feba938c3..cde8c2d74cda 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10051,7 +10051,7 @@ F: drivers/hid/hid-lg-g15.c
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
-M: Chaitra P B <chaitra.basappa@broadcom.com>
+M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
L: MPT-FusionLinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c2dd322691d1..68aea22f2b89 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5052,9 +5052,11 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below
*
- * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
- * devices not currently present.
- * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ * =============================== ======================================
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings
+ * =============================== ======================================
*
* NOTE: Don't use not this function during interrupt time.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 66ed39d6f357..a49743d56b9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -1014,6 +1014,7 @@ int qed_device_num_ports(struct qed_dev *cdev);
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 83e798d4eebb..11367a248d55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1949,6 +1949,15 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
op->link_update(cookie, &if_link);
}
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+
+ if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
+ op->bw_update(cookie);
+}
+
static int qed_drain(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 09ec846fe01d..18b713a616de 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
/*
@@ -415,8 +415,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
- if (!zfcp_scsi_adapter_register(adapter))
- return adapter;
+ return adapter;
failed:
zfcp_adapter_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index b9c93d15f67c..3852367f15f6 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -4,7 +4,7 @@
*
* Definitions for handling diagnostics in the the zfcp device driver.
*
- * Copyright IBM Corp. 2018
+ * Copyright IBM Corp. 2018, 2020
*/
#ifndef ZFCP_DIAG_H
@@ -56,11 +56,11 @@ struct zfcp_diag_adapter {
unsigned long max_age;
- struct {
+ struct zfcp_diag_adapter_port_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data;
} port_data;
- struct {
+ struct zfcp_diag_adapter_config_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data;
} config_data;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3d0bc000f500..db320dab1fee 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3
@@ -768,10 +769,14 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
+ return ZFCP_ERP_SUCCEEDED;
+}
+
+static void
+zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
+{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
-
- return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -800,6 +805,59 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
return ZFCP_ERP_SUCCEEDED;
}
+static enum zfcp_erp_act_result
+zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter_config_data *const config_data =
+ &adapter->diagnostics->config_data;
+ struct zfcp_diag_adapter_port_data *const port_data =
+ &adapter->diagnostics->port_data;
+ unsigned long flags;
+ int rc;
+
+ rc = zfcp_scsi_adapter_register(adapter);
+ if (rc == -EEXIST)
+ return ZFCP_ERP_SUCCEEDED;
+ else if (rc)
+ return ZFCP_ERP_FAILED;
+
+ /*
+ * We allocated the shost for the first time. Before it was NULL,
+ * and so we deferred all updates in the xconf- and xport-data
+ * handlers. We need to make up for that now, and make all the updates
+ * that would have been done before.
+ *
+ * We can be sure that xconf- and xport-data succeeded, because
+ * otherwise this function is not called. But they might have been
+ * incomplete.
+ */
+
+ spin_lock_irqsave(&config_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
+ !!config_data->header.incomplete);
+ spin_unlock_irqrestore(&config_data->header.access_lock, flags);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ spin_lock_irqsave(&port_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
+ spin_unlock_irqrestore(&port_data->header.access_lock, flags);
+ }
+
+ /*
+ * There is a remote possibility that the 'Exchange Port Data' request
+ * reports a different connectivity status than 'Exchange Config Data'.
+ * But any change to the connectivity status of the local optic that
+ * happens after the initial xconf request is expected to be reported
+ * to us, as soon as we post Status Read Buffers to the FCP channel
+ * firmware after this function. So any resulting inconsistency will
+ * only be momentary.
+ */
+ if (config_data->header.incomplete)
+ zfcp_fsf_fc_host_link_down(adapter);
+
+ return ZFCP_ERP_SUCCEEDED;
+}
+
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act)
{
@@ -809,6 +867,12 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
+ if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
+ ZFCP_ERP_FAILED)
+ return ZFCP_ERP_FAILED;
+
+ zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
+
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
@@ -1636,6 +1700,13 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1673,6 +1744,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 88294ca0e2ea..3ef5d74331c3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -125,6 +125,7 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
+extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -134,6 +135,7 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
@@ -153,6 +155,8 @@ extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *);
+extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -169,6 +173,13 @@ extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+extern void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete);
+extern void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[];
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 111fe3fc32d7..c795f22249d8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -120,21 +120,25 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
-static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost = adapter->scsi_host;
+ adapter->hydra_version = 0;
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+
+ /* if there is no shost yet, we have nothing to zero-out */
+ if (shost == NULL)
+ return;
+
fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
-
- adapter->peer_wwpn = 0;
- adapter->peer_wwnn = 0;
- adapter->peer_d_id = 0;
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
@@ -479,7 +483,7 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
-static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -509,64 +513,36 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
- struct Scsi_Host *shost = adapter->scsi_host;
- struct fc_els_flogi *nsp, *plogi;
+ struct fc_els_flogi *plogi;
/* adjust pointers for missing command code */
- nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- - sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
- "IBM");
- fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
- fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- zfcp_scsi_set_prot(adapter);
-
/* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0;
- fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) =
- zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
-
adapter->hydra_version = bottom->adapter_type;
- snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
- bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
- fc_host_port_type(shost) = FC_PORTTYPE_PTP;
- fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
- fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
- if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
- else
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
- fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- fc_host_fabric_name(shost) = 0;
- fallthrough;
default:
- fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -584,13 +560,10 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -606,6 +579,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -630,6 +604,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
break;
@@ -638,16 +614,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
return;
}
- if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version;
- snprintf(fc_host_hardware_version(shost),
- FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->hardware_version);
- memcpy(fc_host_serial_number(shost), bottom->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 17));
- EBCASC(fc_host_serial_number(shost),
- min(FC_SERIAL_NUMBER_SIZE, 17));
- }
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
@@ -761,19 +729,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) =
- zfcp_fsf_convert_portspeed(bottom->supported_speed);
- memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
- FC_FC4_LIST_SIZE);
- memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
- FC_FC4_LIST_SIZE);
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms =
bottom->fc_security_algorithms;
@@ -800,6 +759,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -808,6 +768,8 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 26702b56a7ab..3a7f3374d10a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -342,6 +342,18 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, 0);
}
+void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ shost->sg_tablesize = qdio->max_sbale_per_req;
+ shost->max_sectors = qdio->max_sbale_per_req * 8;
+}
+
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
@@ -420,10 +432,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
- if (adapter->scsi_host) {
- adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
- adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
- }
+ zfcp_qdio_shost_update(adapter, qdio);
return 0;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 13d873f806e4..d58bf79892f2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@ static struct scsi_host_template zfcp_scsi_host_template = {
};
/**
- * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
+ * SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
+ *
+ * Allocates the SCSI host object for the given adapter, sets basic properties
+ * (such as the transport template, QDIO limits, ...), and registers it with
+ * the midlayer.
+ *
+ * During registration with the midlayer the corresponding FC host object for
+ * the referenced transport class is also implicitely allocated.
+ *
+ * Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
+ * adapter->scsi_host is already set, nothing is done.
+ *
+ * Return:
+ * * 0 - Allocation and registration was successful
+ * * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
+ * was changed
+ * * -EIO - Allocation or registration failed
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
- return 0;
+ return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
- if (!adapter->scsi_host) {
- dev_err(&adapter->ccw_device->dev,
- "Registering the FCP device with the "
- "SCSI stack failed\n");
- return -EIO;
- }
+ if (!adapter->scsi_host)
+ goto err_out;
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+ /* make all basic properties known at registration time */
+ zfcp_qdio_shost_update(adapter, adapter->qdio);
+ zfcp_scsi_set_prot(adapter);
+
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
- return -EIO;
+ goto err_out;
}
return 0;
+err_out:
+ adapter->scsi_host = NULL;
+ dev_err(&adapter->ccw_device->dev,
+ "Registering the FCP device with the SCSI stack failed\n");
+ return -EIO;
}
/**
@@ -841,6 +863,95 @@ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
set_host_byte(scmd, DID_SOFT_ERROR);
}
+void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+ const struct fc_els_flogi *nsp, *plogi;
+
+ if (shost == NULL)
+ return;
+
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ }
+
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
+
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
+ fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+ fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ zfcp_scsi_set_prot(adapter);
+
+ /* do not evaluate invalid fields */
+ if (bottom_incomplete)
+ return;
+
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
+
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
+ break;
+ case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case FSF_TOPO_AL:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ fallthrough;
+ default:
+ fc_host_fabric_name(shost) = 0;
+ break;
+ }
+}
+
+void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 7ec30ded0169..8d9662e8b717 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -216,20 +216,32 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int retval = 0;
if (!adapter)
return -ENODEV;
/*
+ * If `scsi_host` is missing, we can't schedule `scan_work`, as it
+ * makes use of the corresponding fc_host object. But this state is
+ * only possible if xconfig/xport data has never completed yet,
+ * and we couldn't successfully scan for ports anyway.
+ */
+ if (adapter->scsi_host == NULL) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
* Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither
* a random delay nor a rate limit is applied here.
*/
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work);
+out:
zfcp_ccw_adapter_put(adapter);
-
- return (ssize_t) count;
+ return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index b5b3154e2c28..bb49d83cadc7 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2237,7 +2237,7 @@ static bool __init blogic_inquiry(struct blogic_adapter *adapter)
"INQUIRE INSTALLED DEVICES ID 0 TO 7");
for (tgt_id = 0; tgt_id < 8; tgt_id++)
adapter->tgt_flags[tgt_id].tgt_exists =
- (installed_devs0to7[tgt_id] != 0 ? true : false);
+ installed_devs0to7[tgt_id] != 0;
}
/*
Issue the Inquire Setup Information command.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index eb72ac8136c3..2b868f8db8ff 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -814,7 +814,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
- scsicmd->list.next = NULL;
scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index ffe41bc111fc..34e65dea992e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -513,15 +513,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
- if (!user_srbcmd) {
- dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
- rcode = -ENOMEM;
- goto cleanup;
- }
- if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
- rcode = -EFAULT;
+ user_srbcmd = memdup_user(user_srb, fibsize);
+ if (IS_ERR(user_srbcmd)) {
+ rcode = PTR_ERR(user_srbcmd);
+ user_srbcmd = NULL;
goto cleanup;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ddd73f6798af..8ee4e1abe568 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2351,7 +2351,7 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
@@ -2380,7 +2380,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 83a60b0a8cd8..a308e86a97f1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
return HBA_IU_TYPE_SATA_REQ;
}
-void aac_tmf_callback(void *context, struct fib *fibptr)
+static void aac_tmf_callback(void *context, struct fib *fibptr)
{
struct aac_hba_resp *err =
&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
@@ -1078,7 +1078,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
* @scsi_cmd: SCSI command block causing the reset
*
*/
-int aac_eh_host_reset(struct scsi_cmnd *cmd)
+static int aac_eh_host_reset(struct scsi_cmnd *cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
@@ -1632,7 +1632,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
- int error = -ENODEV;
+ int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1657,7 +1657,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
- error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1689,8 +1688,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
- if (!shost)
+ if (!shost) {
+ error = -ENOMEM;
goto out_disable_pdev;
+ }
shost->irq = pdev->irq;
shost->unique_id = unique_id;
@@ -1714,8 +1715,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
- if (!aac->fibs)
+ if (!aac->fibs) {
+ error = -ENOMEM;
goto out_free_host;
+ }
+
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a336a458c978..e4a09b93d00c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3662,8 +3662,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
return;
tstate = ahd->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahd->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -6054,14 +6053,13 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
- ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
+ ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
kfree(ahd);
@@ -6120,8 +6118,7 @@ ahd_set_unit(struct ahd_softc *ahd, int unit)
void
ahd_set_name(struct ahd_softc *ahd, char *name)
{
- if (ahd->name != NULL)
- kfree(ahd->name);
+ kfree(ahd->name);
ahd->name = name;
}
@@ -6182,12 +6179,9 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->black_hole);
}
#endif
- if (ahd->name != NULL)
- kfree(ahd->name);
- if (ahd->seep_config != NULL)
- kfree(ahd->seep_config);
- if (ahd->saved_stack != NULL)
- kfree(ahd->saved_stack);
+ kfree(ahd->name);
+ kfree(ahd->seep_config);
+ kfree(ahd->saved_stack);
kfree(ahd);
return;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 84fc499cb1e6..3d4df906fa4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2178,8 +2178,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
if (channel == 'B')
scsi_id += 8;
tstate = ahc->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahc->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -4384,13 +4383,13 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
- ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
+ ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahc, 0, sizeof(*ahc));
+
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
kfree(ahc);
@@ -4453,8 +4452,7 @@ ahc_set_unit(struct ahc_softc *ahc, int unit)
void
ahc_set_name(struct ahc_softc *ahc, char *name)
{
- if (ahc->name != NULL)
- kfree(ahc->name);
+ kfree(ahc->name);
ahc->name = name;
}
@@ -4515,10 +4513,8 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->black_hole);
}
#endif
- if (ahc->name != NULL)
- kfree(ahc->name);
- if (ahc->seep_config != NULL)
- kfree(ahc->seep_config);
+ kfree(ahc->name);
+ kfree(ahc->seep_config);
kfree(ahc);
return;
}
@@ -4927,8 +4923,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
case 0:
break;
}
- if (scb_data->scbarray != NULL)
- kfree(scb_data->scbarray);
+ kfree(scb_data->scbarray);
}
static void
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 3ddc8852bc32..105adba559a1 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -406,7 +406,7 @@ struct asd_manuf_sec {
u8 sas_addr[SAS_ADDR_SIZE];
u8 pcba_sn[ASD_PCBA_SN_SIZE];
/* Here start the other segments */
- u8 linked_list[0];
+ u8 linked_list[];
} __attribute__ ((packed));
struct asd_manuf_phy_desc {
@@ -449,7 +449,7 @@ struct asd_ms_sb_desc {
u8 type;
u8 node_desc_index;
u8 conn_desc_index;
- u8 _recvd[0];
+ u8 _recvd[];
} __attribute__ ((packed));
#if 0
@@ -478,12 +478,12 @@ struct asd_ms_conn_desc {
u8 size_sideband_desc;
u32 _resvd;
u8 name[16];
- struct asd_ms_sb_desc sb_desc[0];
+ struct asd_ms_sb_desc sb_desc[];
} __attribute__ ((packed));
struct asd_nd_phy_desc {
u8 vp_attch_type;
- u8 attch_specific[0];
+ u8 attch_specific[];
} __attribute__ ((packed));
#if 0
@@ -503,7 +503,7 @@ struct asd_ms_node_desc {
u8 size_phy_desc;
u8 _resvd;
u8 name[16];
- struct asd_nd_phy_desc phy_desc[0];
+ struct asd_nd_phy_desc phy_desc[];
} __attribute__ ((packed));
struct asd_ms_conn_map {
@@ -518,7 +518,7 @@ struct asd_ms_conn_map {
u8 usage_model_id;
u32 _resvd;
struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[0];
+ struct asd_ms_node_desc node_desc[];
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
@@ -542,7 +542,7 @@ struct asd_ll_el {
u8 id0;
u8 id1;
__le16 next;
- u8 something_here[0];
+ u8 something_here[];
} __attribute__ ((packed));
static int asd_poll_flash(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0f554ebb8f2c..fb4c469bd89f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -708,7 +708,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 284baa3b0c8e..766f2b5ed2ab 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -436,7 +436,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK;
}
-void
+static void
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{
struct bfa_itnim_latency_s *io_lat =
@@ -453,7 +453,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->avg[idx] += val;
}
-void
+static void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{
ioim->start_time = jiffies;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7c3eadc58b98..297a77f5806c 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1283,7 +1283,7 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
n2n_port->reply_oxid = 0;
}
-void
+static void
bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
{
int i = 0, j = 0, bit = 0, alpa_bit = 0;
@@ -4358,7 +4358,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
- };
+ }
break;
default:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 82801b366500..fc294e1950a6 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1575,7 +1575,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
- };
+ }
break;
case RPSM_EVENT_DELETE:
@@ -2449,7 +2449,7 @@ bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
- };
+ }
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 18b58b2f304f..6fd3383ee538 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -364,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{
u32 r32;
@@ -744,7 +744,7 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
writel(0, (rb + CT2_MBIST_CTL_REG));
}
-void
+static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
/* put port0, port1 MAC & AHB in reset */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6d2131441f0a..0b7d2e8f4a66 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -4284,7 +4284,7 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
}
-void
+static void
bfa_fcport_ddportenable(struct bfa_s *bfa)
{
/*
@@ -4293,7 +4293,7 @@ bfa_fcport_ddportenable(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
}
-void
+static void
bfa_fcport_ddportdisable(struct bfa_s *bfa)
{
/*
@@ -5517,7 +5517,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf;
uint8_t *buf;
- struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
@@ -5526,8 +5525,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
- fchs = (struct fchs_s *)uf_buf;
-
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index eb0c76338295..bc5d84f87d8f 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -50,7 +50,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
-int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index fbfce02e5b93..5ae1e3f78910 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -437,7 +437,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
-int
+static int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
@@ -562,7 +562,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-void
+static void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a76c968dbac5..412dbe125e10 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -136,7 +136,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 1cbb431fa682..0e33324e16f5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -945,7 +945,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
*/
if (interface->enabled)
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -965,7 +965,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
put_cpu();
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
- };
+ }
}
}
mutex_unlock(&bnx2fc_dev_lock);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 2b070f0835df..1aba5897ccb0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1081,6 +1081,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+ __must_hold(&tgt->tgt_lock)
{
struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 524cdbcd29aa..ec7d01f6e2d5 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL;
+ int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_sock;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) {
- cxgb3_free_atid(t3dev, csk->atid);
- cxgbi_sock_put(csk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_atid;
}
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
+
+free_atid:
+ cxgb3_free_atid(t3dev, csk->atid);
+put_sock:
+ cxgbi_sock_put(csk);
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+
+ return ret;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index bc1086ae6835..8ce8592f6a64 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,10 +1127,9 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
if (!csk)
goto rel_skb;
- if (csk)
- pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
- (&csk->saddr), (&csk->daddr), csk,
- csk->state, csk->flags, csk->tid, rpl->status);
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr), csk,
+ csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index fbd2ae40dab4..fcc5aa9f6014 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3744,6 +3744,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
+ rc = -ENOMEM;
goto out_remove;
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 02dff3a684e0..2cf889512ece 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1120,7 +1120,7 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
{
struct adpt_device* d;
- if(chan < 0 || chan >= MAX_CHANNEL)
+ if (chan >= MAX_CHANNEL)
return NULL;
d = pHba->channel[chan].device[id];
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 25dae9f0b205..cb41d166e0c0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1915,7 +1915,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -1927,7 +1927,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
stats->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
- };
+ }
}
out:
return rc;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 18584ab27c32..7910b573bacb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -49,8 +49,8 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
-LIST_HEAD(fnic_list);
-DEFINE_SPINLOCK(fnic_list_lock);
+static LIST_HEAD(fnic_list);
+static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index b60795893994..27535c90b248 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2624,8 +2624,8 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->internal_reset_inprogress == 0) {
- fnic->internal_reset_inprogress = 1;
+ if (!fnic->internal_reset_inprogress) {
+ fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2654,7 +2654,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = 0;
+ fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1b88a3b53eee..a2beee6e09f0 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -254,7 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
+static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
@@ -316,7 +316,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
return -ETIMEDOUT;
}
-int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
@@ -411,7 +411,7 @@ int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
-int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
@@ -422,7 +422,7 @@ int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
}
-int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
@@ -492,7 +492,7 @@ err_free_devcmd2:
}
-void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
@@ -503,7 +503,7 @@ void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
}
-int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 015af2cdabaf..442972c04e65 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -25,7 +25,7 @@
#include "vnic_wq.h"
-int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
@@ -37,7 +37,7 @@ int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
}
-int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index fe03410268e6..7f150d52b4a6 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -329,8 +329,8 @@ static void gdth_scsi_done(struct scsi_cmnd *scp)
scp->scsi_done(scp);
}
-int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
+static int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd,
+ char *cmnd, int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_cmnd *scp;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9a6deb21fe4d..11caa4b0d797 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -898,8 +898,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
add_timer(&phy->timer);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fa25766502a2..2e1718f9ade2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1175,15 +1175,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
-static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot)
+static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
- enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
@@ -1194,7 +1193,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -1260,7 +1259,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -1309,12 +1308,9 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
- sts = ts->stat;
if (task->task_done)
task->task_done(task);
-
- return sts;
}
/* Interrupts */
@@ -1757,6 +1753,7 @@ static struct device_attribute *host_attrs_v1_hw[] = {
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e05faf315dcd..e7e7849a4c14 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2318,8 +2318,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
}
-static int
-slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2327,7 +2327,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
@@ -2337,7 +2336,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2406,7 +2405,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2456,12 +2455,11 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2473,15 +2471,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
@@ -3533,6 +3529,7 @@ static struct device_attribute *host_attrs_v2_hw[] = {
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 374885aa8d77..3e6b78a1f993 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -912,11 +912,15 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return -EINVAL;
}
- /* Switch over to MSI handling , from PCI AER default */
+ /*
+ * This DSM handles some hardware-related configurations:
+ * 1. Switch over to MSI error handling in kernel
+ * 2. BIOS *may* reset some register values through this method
+ */
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
DSM_FUNC_ERR_HANDLE_MSI, NULL);
if (!obj)
- dev_warn(dev, "Switch over to MSI handling failed\n");
+ dev_warn(dev, "can not find DSM method, ignore\n");
else
ACPI_FREE(obj);
@@ -2152,8 +2156,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
-static int
-slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2161,7 +2165,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
@@ -2171,7 +2174,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2233,7 +2236,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2278,12 +2281,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2295,15 +2297,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
@@ -2897,6 +2897,7 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
};
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
@@ -3071,6 +3072,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 59f0f1030c54..44e64aa21194 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2384,7 +2384,7 @@ static struct vio_driver ibmvscsi_driver = {
static struct srp_function_template ibmvscsi_transport_functions = {
};
-int __init ibmvscsi_module_init(void)
+static int __init ibmvscsi_module_init(void)
{
int ret;
@@ -2406,7 +2406,7 @@ int __init ibmvscsi_module_init(void)
return ret;
}
-void __exit ibmvscsi_module_exit(void)
+static void __exit ibmvscsi_module_exit(void)
{
vio_unregister_driver(&ibmvscsi_driver);
srp_release_transport(ibmvscsi_transport_template);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d48a8fa997b9..7d77997d26d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1164,7 +1164,7 @@ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int pr
default:
res->ata_class = ATA_DEV_UNKNOWN;
break;
- };
+ }
}
/**
@@ -9529,8 +9529,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
}
- if (ioa_cfg->ipr_cmd_pool)
- dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
+ dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 680e30947671..4e6b1decbca7 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -500,19 +500,19 @@ struct sci_timer {
static inline
void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{
- tmr->cancel = 0;
+ tmr->cancel = false;
timer_setup(&tmr->timer, fn, 0);
}
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
{
- tmr->cancel = 0;
+ tmr->cancel = false;
mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
}
static inline void sci_del_timer(struct sci_timer *tmr)
{
- tmr->cancel = 1;
+ tmr->cancel = true;
del_timer(&tmr->timer);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 874dd4beed10..e5a64d4f255c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2627,7 +2627,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
if (xmit_can_sleep) {
snprintf(ihost->workq_name, sizeof(ihost->workq_name),
"iscsi_q_%d", shost->host_no);
- ihost->workq = create_singlethread_workqueue(ihost->workq_name);
+ ihost->workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, ihost->workq_name);
if (!ihost->workq)
goto free_host;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c5a828a041e0..5d716d388707 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -160,6 +160,7 @@ qc_already_gone:
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ __must_hold(ap->lock)
{
struct sas_task *task;
struct scatterlist *sg;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 62e96d4fdcc6..c3ceb6e5b061 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -627,6 +627,19 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */
};
+enum lpfc_irq_chann_mode {
+ /* Assign IRQs to all possible cpus that have hardware queues */
+ NORMAL_MODE,
+
+ /* Assign IRQs only to cpus on the same numa node as HBA */
+ NUMA_MODE,
+
+ /* Assign IRQs only on non-hyperthreaded CPUs. This is the
+ * same as normal_mode, but assign IRQS only on physical CPUs.
+ */
+ NHT_MODE,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -835,7 +848,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
- uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -1003,6 +1015,7 @@ struct lpfc_hba {
mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
+ enum lpfc_irq_chann_mode irq_chann_mode;
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
@@ -1314,19 +1327,19 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
- * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
- * @numa_mask: Pointer to phba's numa_mask member.
+ * lpfc_next_online_cpu - Finds next online CPU on cpumask
+ * @mask: Pointer to phba's cpumask member.
* @start: starting cpu index
*
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
static inline unsigned int
-lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
unsigned int cpu_it;
- for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ for_each_cpu_wrap(cpu_it, mask, start) {
if (cpu_online(cpu_it))
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f089867674cb..a62c60ca6477 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4874,7 +4874,7 @@ lpfc_request_firmware_upgrade_store(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- int val = 0, rc = -EINVAL;
+ int val = 0, rc;
/* Sanity check on user data */
if (!isdigit(buf[0]))
@@ -5701,17 +5701,69 @@ LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
-static inline void
-lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+#if IS_ENABLED(CONFIG_X86)
+/**
+ * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
+ * irq_chann_mode
+ * @phba: Pointer to HBA context object.
+ **/
+static void
+lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
+ const struct cpumask *sibling_mask;
+ struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ cpumask_clear(aff_mask);
+
+ if (phba->irq_chann_mode == NUMA_MODE) {
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE) {
+ phba->irq_chann_mode = NORMAL_MODE;
+ return;
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ switch (phba->irq_chann_mode) {
+ case NUMA_MODE:
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, aff_mask);
+ break;
+ case NHT_MODE:
+ sibling_mask = topology_sibling_cpumask(cpu);
+ first_cpu = cpumask_first(sibling_mask);
+ if (first_cpu < nr_cpu_ids)
+ cpumask_set_cpu(first_cpu, aff_mask);
+ break;
+ default:
+ break;
+ }
+ }
+}
+#endif
+
+static void
+lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
{
#if IS_ENABLED(CONFIG_X86)
- /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- phba->cfg_irq_numa = 1;
- else
- phba->cfg_irq_numa = 0;
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /* If AMD architecture, then default is NUMA_MODE */
+ phba->irq_chann_mode = NUMA_MODE;
+ break;
+ case X86_VENDOR_INTEL:
+ /* If Intel architecture, then default is no hyperthread mode */
+ phba->irq_chann_mode = NHT_MODE;
+ break;
+ default:
+ phba->irq_chann_mode = NORMAL_MODE;
+ break;
+ }
+ lpfc_cpumask_irq_mode_init(phba);
#else
- phba->cfg_irq_numa = 0;
+ phba->irq_chann_mode = NORMAL_MODE;
#endif
}
@@ -5723,6 +5775,7 @@ lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
*
* 0 = Configure number of IRQ Channels to:
* if AMD architecture, number of CPUs on HBA's NUMA node
+ * if Intel architecture, number of physical CPUs.
* otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use.
*
@@ -5748,35 +5801,44 @@ MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{
- const struct cpumask *numa_mask;
+ const struct cpumask *aff_mask;
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi);
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return 0;
}
/* Check if default setting was passed */
if (val == LPFC_IRQ_CHANN_DEF)
- lpfc_assign_default_irq_numa(phba);
+ lpfc_assign_default_irq_chann(phba);
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
+ if (phba->irq_chann_mode != NORMAL_MODE) {
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
- if (cpumask_empty(numa_mask)) {
+ if (cpumask_empty(aff_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8533 Could not identify NUMA node, "
- "ignoring cfg_irq_numa\n");
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ "8533 Could not identify CPUS for "
+ "mode %d, ignoring\n",
+ phba->irq_chann_mode);
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
} else {
- phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ phba->cfg_irq_chann = cpumask_weight(aff_mask);
+
+ /* If no hyperthread mode, then set hdwq count to
+ * aff_mask weight as well
+ */
+ if (phba->irq_chann_mode == NHT_MODE)
+ phba->cfg_hdw_queue = phba->cfg_irq_chann;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u "
- "(numa)\n", phba->cfg_irq_chann);
+ "(mode: %d)\n", phba->cfg_irq_chann,
+ phba->irq_chann_mode);
}
} else {
if (val > LPFC_IRQ_CHANN_MAX) {
@@ -5787,7 +5849,7 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
val,
LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX);
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL;
}
phba->cfg_irq_chann = val;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 196f6ae9952e..69d4710d95a0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -461,7 +461,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
- (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 4daae90e0c99..ae0a8252128c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2429,7 +2429,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
return 0;
if (dent == phba->debug_InjErrLBA) {
- if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
+ (dstbuf[2] == 'f'))
tmp = (uint64_t)(-1);
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 80d1e661b0d4..3d670568a276 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7936,19 +7936,13 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if ((phba->pport->load_flag & FC_UNLOADING))
+ if (phba->pport->load_flag & FC_UNLOADING)
return;
+
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
- if ((phba->pport->load_flag & FC_UNLOADING)) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- return;
- }
-
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
@@ -8514,6 +8508,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f5952f8cd4b5..4084f7f2b821 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1360,14 +1360,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record.
*
* This routine updates the driver FCF priority record from the new HBA FCF
- * record. This routine is called with the host lock held.
+ * record. The hbalock is asserted held in the code path calling this
+ * routine.
**/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1376,8 +1376,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
- lockdep_assert_held(&phba->hbalock);
-
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1455,7 +1453,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
*
* This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This
- * routine is called with the host lock held.
+ * routine is called with the hbalock held.
**/
static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 10c5d1c3122e..6dfff0376547 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3541,7 +3541,7 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UER_SHIFT 0
#define lpfc_mbx_set_feature_UER_MASK 0x00000001
#define lpfc_mbx_set_feature_UER_WORD word6
-#define lpfc_mbx_set_feature_mds_SHIFT 0
+#define lpfc_mbx_set_feature_mds_SHIFT 2
#define lpfc_mbx_set_feature_mds_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_WORD word6
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ea99483345f2..69a5249e007a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6020,29 +6020,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
- * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
- * @phba: Pointer to HBA context object.
- *
- **/
-static void
-lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
-{
- unsigned int cpu, numa_node;
- struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
-
- cpumask_clear(numa_mask);
-
- /* Check if we're a NUMA architecture */
- numa_node = dev_to_node(&phba->pcidev->dev);
- if (numa_node == NUMA_NO_NODE)
- return;
-
- for_each_possible_cpu(cpu)
- if (cpu_to_node(cpu) == numa_node)
- cpumask_set_cpu(cpu, numa_mask);
-}
-
-/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6480,7 +6457,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
- lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6688,6 +6664,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
#endif
/* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0;
+ if (phba->irq_chann_mode == NHT_MODE) {
+ phba->cfg_irq_chann =
+ phba->sli4_hba.num_present_cpu;
+ phba->cfg_hdw_queue =
+ phba->sli4_hba.num_present_cpu;
+ phba->irq_chann_mode = NORMAL_MODE;
+ }
break;
}
}
@@ -7029,7 +7012,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
- cpumask_clear(&phba->sli4_hba.numa_mask);
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -11284,11 +11267,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
* @offline: true, cpu is going offline. false, cpu is coming online.
*
* If cpu is going offline, we'll try our best effort to find the next
- * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
*
- * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
*
- * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
* PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
*
**/
@@ -11298,14 +11282,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
struct lpfc_vector_map_info *cpup;
struct cpumask *aff_mask;
unsigned int cpu_select, cpu_next, idx;
- const struct cpumask *numa_mask;
+ const struct cpumask *orig_mask;
- if (!phba->cfg_irq_numa)
+ if (phba->irq_chann_mode == NORMAL_MODE)
return;
- numa_mask = &phba->sli4_hba.numa_mask;
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
- if (!cpumask_test_cpu(cpu, numa_mask))
+ if (!cpumask_test_cpu(cpu, orig_mask))
return;
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11314,9 +11298,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
return;
if (offline) {
- /* Find next online CPU on NUMA node */
- cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+ /* Find next online CPU on original mask */
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11431,7 +11415,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
- const struct cpumask *numa_mask = NULL;
+ const struct cpumask *aff_mask = NULL;
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
struct lpfc_hba_eq_hdl *eqhdl;
const struct cpumask *maskp;
@@ -11441,16 +11425,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
- cpu_cnt = cpumask_weight(numa_mask);
+ if (phba->irq_chann_mode != NORMAL_MODE)
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (aff_mask) {
+ cpu_cnt = cpumask_weight(aff_mask);
vectors = min(phba->cfg_irq_chann, cpu_cnt);
- /* cpu: iterates over numa_mask including offline or online
- * cpu_select: iterates over online numa_mask to set affinity
+ /* cpu: iterates over aff_mask including offline or online
+ * cpu_select: iterates over online aff_mask to set affinity
*/
- cpu = cpumask_first(numa_mask);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ cpu = cpumask_first(aff_mask);
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else {
flags |= PCI_IRQ_AFFINITY;
}
@@ -11484,7 +11470,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
eqhdl->irq = pci_irq_vector(phba->pcidev, index);
- if (phba->cfg_irq_numa) {
+ if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11494,11 +11480,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_CPU_FIRST_IRQ,
cpu);
- /* Iterate to next offline or online cpu in numa_mask */
- cpu = cpumask_next(cpu, numa_mask);
+ /* Iterate to next offline or online cpu in aff_mask */
+ cpu = cpumask_next(cpu, aff_mask);
- /* Find next online cpu in numa_mask to set affinity */
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ /* Find next online cpu in aff_mask to set affinity */
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else if (vectors == 1) {
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e35b52b66d6c..e34e0f11bfdd 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1378,7 +1378,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
- phba->host_gp = &phba->mbox->us.s2.host[0];
+ phba->host_gp = (struct lpfc_hgp __iomem *)
+ &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
offset = (uint8_t *)&phba->mbox->us.s2.host -
(uint8_t *)phba->slim2p.virt;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index b46ba70f78da..b16c087ba272 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1654,11 +1654,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (vport->load_flag & FC_UNLOADING) {
- ret = -ENODEV;
- goto out_fail;
- }
-
if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
@@ -2491,38 +2486,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
- /* New remoteport record does not guarantee valid
- * host private memory area.
- */
- if (oldrport == remote_port->private) {
- /* Same remoteport - ndlp should match.
- * Just reuse.
- */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebind lport to current "
- "remoteport x%px wwpn 0x%llx, "
- "Data: x%x x%x x%px x%px x%x "
- " x%06x\n",
- remote_port,
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- oldrport->ndlp,
- ndlp,
- ndlp->nlp_type,
- ndlp->nlp_DID);
-
- /* It's a complete rebind only if the driver
- * is registering with the same ndlp. Otherwise
- * the driver likely executed a node swap
- * prior to this registration and the ndlp to
- * remoteport binding needs to be redone.
- */
- if (prev_ndlp == ndlp)
- return 0;
-
- }
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 32eb5e873e9b..88760416a8cb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1030,11 +1030,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
- if (phba->pport->load_flag & FC_UNLOADING) {
- rc = -ENODEV;
- goto aerr;
- }
-
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -1157,9 +1152,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
- if (phba->pport->load_flag & FC_UNLOADING)
- return;
-
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9e21c4f3b009..25653baba367 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -535,7 +535,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
if (count > eq->EQ_max_eqe)
eq->EQ_max_eqe = count;
- eq->queue_claimed = 0;
+ xchg(&eq->queue_claimed, 0);
rearm_and_exit:
/* Always clear the EQ. */
@@ -1245,8 +1245,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
+ * This function is called to release the driver iocb object
+ * to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual
@@ -1256,7 +1256,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
- * entry is added to the free list (lpfc_els_sgl_list).
+ * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
+ * asserted held in the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
@@ -1266,8 +1267,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
- lockdep_assert_held(&phba->hbalock);
-
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1330,18 +1329,17 @@ out:
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
- * does not change for each use of the iocb object. This function
- * clears all other fields of the iocb object when it is freed.
+ * This function is called to release the driver iocb object to the
+ * iocb pool. The iotag in the iocb object does not change for each
+ * use of the iocb object. This function clears all other fields of
+ * the iocb object when it is freed. The hbalock is asserted held in
+ * the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- lockdep_assert_held(&phba->hbalock);
-
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1786,17 +1784,17 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware.
*
- * This function is called with hbalock held to post a new iocb to
- * the firmware. This function copies the new iocb to ring iocb slot and
- * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * This function is called to post a new iocb to the firmware. This
+ * function copies the new iocb to ring iocb slot and updates the
+ * ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the
- * iocb object.
+ * iocb object. The hbalock is asserted held in the code path calling
+ * this routine.
**/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
- lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
@@ -11284,6 +11282,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* request, this function issues abort out unconditionally. This function is
* called with hbalock held. The function returns 0 when it fails due to
* memory allocation failure or when the command iocb is an abort request.
+ * The hbalock is asserted held in the code path calling this routine.
**/
static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -11297,8 +11296,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflags;
struct lpfc_nodelist *ndlp;
- lockdep_assert_held(&phba->hbalock);
-
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
@@ -13808,7 +13805,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
"0369 No entry from completion queue "
"qid=%d\n", cq->queue_id);
- cq->queue_claimed = 0;
+ xchg(&cq->queue_claimed, 0);
rearm_and_exit:
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
@@ -14389,7 +14386,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
int ecount = 0;
int hba_eqidx;
struct lpfc_eq_intr_info *eqi;
- uint32_t icnt;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14417,11 +14413,12 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = phba->sli4_hba.eq_info;
- icnt = this_cpu_inc_return(eqi->icnt);
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
+
fpeq->last_cpu = raw_smp_processor_id();
- if (icnt > LPFC_EQD_ISR_TRIGGER &&
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 8da7429e385a..4decb53d81c3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -920,7 +920,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
- struct cpumask numa_mask;
+ struct cpumask irq_aff_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ca40c47cfbe0..ab0bc26c098d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.0"
+#define LPFC_DRIVER_VERSION "12.8.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 8443f2f35be2..8f918df631bf 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -302,8 +302,8 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
-DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
- NULL);
+static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+ NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
@@ -312,7 +312,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
};
-DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 83d8c4cb1ad5..af2c7a2a9565 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.713.01.00-rc1"
-#define MEGASAS_RELDATE "Dec 27, 2019"
+#define MEGASAS_VERSION "07.714.04.00-rc1"
+#define MEGASAS_RELDATE "Apr 14, 2020"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -511,7 +511,7 @@ union MR_PROGRESS {
*/
struct MR_PD_PROGRESS {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
@@ -537,7 +537,7 @@ struct MR_PD_PROGRESS {
};
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index babe85d7b537..00668335c2af 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -81,7 +81,7 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
-int rdpq_enable = 1;
+static int rdpq_enable = 1;
module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
@@ -89,7 +89,7 @@ unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
-unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
@@ -1982,9 +1982,9 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
if (is_target_prop) {
tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
- if (tgt_device_qd &&
- (tgt_device_qd <= instance->host->can_queue))
- device_qd = tgt_device_qd;
+ if (tgt_device_qd)
+ device_qd = min(instance->host->can_queue,
+ (int)tgt_device_qd);
}
if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 89c3685f5163..3b3d04d7671f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -85,7 +85,7 @@ u32 mega_mod64(u64 dividend, u32 divisor)
*
* @return quotient
**/
-u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{
u32 remainder;
u64 d;
@@ -367,7 +367,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
return 1;
}
-u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
@@ -417,7 +417,7 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* div_error - Devide error code.
*/
-u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
@@ -642,7 +642,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
}
/* This Function will return Phys arm */
-u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -785,7 +785,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
* span - Span number
* block - Absolute Block number in the physical disk
*/
-u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map)
@@ -1342,7 +1342,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
}
}
-u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *io_info,
struct MR_DRV_RAID_MAP_ALL *drv_map)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b2ad96564484..319f241da4b6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -176,7 +176,7 @@ static inline bool megasas_check_same_4gb_region
* megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set
*/
-void
+static void
megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
@@ -198,7 +198,7 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
* megasas_disable_intr_fusion - Disables interrupt
* @regs: MFI register set
*/
-void
+static void
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
@@ -2070,7 +2070,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
bool build_prp = false;
@@ -2099,63 +2098,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
build_prp = true;
}
-/*
- * Below code detects gaps/holes in IO data buffers.
- * What does holes/gaps mean?
- * Any SGE except first one in a SGL starts at non NVME page size
- * aligned address OR Any SGE except last one in a SGL ends at
- * non NVME page size boundary.
- *
- * Driver has already informed block layer by setting boundary rules for
- * bio merging done at NVME page size boundary calling kernel API
- * blk_queue_virt_boundary inside slave_config.
- * Still there is possibility of IO coming with holes to driver because of
- * IO merging done by IO scheduler.
- *
- * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
- * IO scheduling so no IO merging.
- *
- * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
- * then sending IOs with holes.
- *
- * Though driver can request block layer to disable IO merging by calling-
- * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
- * user may tune sysfs parameter- nomerges again to 0 or 1.
- *
- * If in future IO scheduling is enabled with SCSI BLK MQ,
- * this algorithm to detect holes will be required in driver
- * for SCSI BLK MQ enabled case as well.
- *
- *
- */
- scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
- if ((i != 0) && (i != (sge_count - 1))) {
- if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
- mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == 0)) {
- if ((mega_mod64((sg_dma_address(sg_scmd) +
- sg_dma_len(sg_scmd)),
- mr_nvme_pg_size))) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == (sge_count - 1))) {
- if (mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
- }
-
return build_prp;
}
@@ -4230,7 +4172,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
bool return_ioctl)
{
int j;
@@ -4238,8 +4180,9 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
- bool refire_cmd = 0;
+ bool refire_cmd = false;
u8 result;
u32 opcode = 0;
@@ -4305,6 +4248,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
result = COMPLETE_CMD;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4533,7 +4481,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
@@ -4713,12 +4660,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
@@ -4783,12 +4730,12 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d57ecc7f88d8..30de4b01f703 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_CPU_AFFINITY_MASK {
union {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u8 hw_path:1;
u8 cpu0:1;
u8 cpu1:1;
@@ -866,7 +866,7 @@ struct MR_LD_RAID {
__le16 seqNum;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 ldSyncRequired:1;
u32 regTypeReqOnReadIsValid:1;
u32 isEPD:1;
@@ -889,7 +889,7 @@ struct {
/* 0x30 - 0x33, Logical block size for the LD */
u32 logical_block_length;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
u32 ld_pi_exp:4;
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 84fb3fbdb0ca..e76d994dbed3 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -7,4 +7,5 @@ mpt3sas-y += mpt3sas_base.o \
mpt3sas_transport.o \
mpt3sas_ctl.o \
mpt3sas_trigger_diag.o \
- mpt3sas_warpdrive.o
+ mpt3sas_warpdrive.o \
+ mpt3sas_debugfs.o \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 663782bb790d..beaea1933f5c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -413,7 +413,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2SGESimple32_t *sgel, *sgel_next;
u32 sgl_flags, sge_chain_count = 0;
- bool is_write = 0;
+ bool is_write = false;
u16 i = 0;
void __iomem *buffer_iomem;
phys_addr_t buffer_iomem_phys;
@@ -482,7 +482,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
- is_write = 1;
+ is_write = true;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
@@ -2806,58 +2806,38 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
- u64 required_mask, coherent_mask;
struct sysinfo s;
- /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
-
- if (ioc->is_mcpu_endpoint)
- goto try_32bit;
+ int dma_mask;
- required_mask = dma_get_required_mask(&pdev->dev);
- if (sizeof(dma_addr_t) == 4 || required_mask == 32)
- goto try_32bit;
-
- if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(dma_mask);
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+ dma_get_required_mask(&pdev->dev) <= 32)
+ dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ dma_mask = 63;
else
- coherent_mask = DMA_BIT_MASK(32);
+ dma_mask = 64;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, coherent_mask))
- goto try_32bit;
-
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = dma_mask;
- goto out;
-
- try_32bit:
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
return -ENODEV;
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- out:
+ if (dma_mask > 32) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ } else {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ }
+
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->dma_mask, convert_to_kb(s.totalram));
+ dma_mask, convert_to_kb(s.totalram));
return 0;
}
-static int
-_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
- struct pci_dev *pdev)
-{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- return -ENODEV;
- }
- return 0;
-}
-
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -4827,8 +4807,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
int j = 0;
+ int dma_alloc_count = 0;
struct chain_tracker *ct;
- struct reply_post_struct *rps;
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -4870,29 +4851,34 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->reply_post) {
- do {
- rps = &ioc->reply_post[i];
- if (rps->reply_post_free) {
- dma_pool_free(
- ioc->reply_post_free_dma_pool,
- rps->reply_post_free,
- rps->reply_post_free_dma);
- dexitprintk(ioc,
- ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
- rps->reply_post_free));
- rps->reply_post_free = NULL;
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ for (i = 0; i < count; i++) {
+ if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
+ && dma_alloc_count) {
+ if (ioc->reply_post[i].reply_post_free) {
+ dma_pool_free(
+ ioc->reply_post_free_dma_pool,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post[i].reply_post_free_dma);
+ dexitprintk(ioc, ioc_info(ioc,
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->reply_post[i].reply_post_free));
+ ioc->reply_post[i].reply_post_free =
+ NULL;
+ }
+ --dma_alloc_count;
}
- } while (ioc->rdpq_array_enable &&
- (++i < ioc->reply_queue_count));
+ }
+ dma_pool_destroy(ioc->reply_post_free_dma_pool);
if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool,
- ioc->reply_post_free_array,
- ioc->reply_post_free_array_dma);
+ ioc->reply_post_free_array,
+ ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL;
}
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
- dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@@ -4902,8 +4888,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
- if (ioc->pcie_sgl_dma_pool)
- dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -4915,7 +4900,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -4935,7 +4922,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * is_MSB_are_same - checks whether all reply queues in a set are
+ * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
@@ -4945,7 +4932,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
*/
static int
-is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
{
long reply_pool_end_address;
@@ -4959,6 +4946,88 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
}
/**
+ * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
+ * for reply queues.
+ * @ioc: per adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
+{
+ int i = 0;
+ u32 dma_alloc_count = 0;
+ int reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
+
+ ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
+ GFP_KERNEL);
+ if (!ioc->reply_post)
+ return -ENOMEM;
+ /*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
+ * be within 4GB boundary i.e reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating
+ * the DMA'able memory for reply queues according.
+ * Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ ioc->reply_post_free_dma_pool =
+ dma_pool_create("reply_post_free pool",
+ &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
+ ioc->reply_post[i].reply_post_free =
+ dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free)
+ return -ENOMEM;
+ /*
+ * Each set of RDPQ pool must satisfy 4gb boundary
+ * restriction.
+ * 1) Check if allocated resources for RDPQ pool are in
+ * the same 4GB range.
+ * 2) If #1 is true, continue with 64 bit DMA.
+ * 3) If #1 is false, return 1. which means free all the
+ * resources and set DMA mask to 32 and allocate.
+ */
+ if (!mpt3sas_check_same_4gb_region(
+ (long)ioc->reply_post[i].reply_post_free, sz)) {
+ dinitprintk(ioc,
+ ioc_err(ioc, "bad Replypost free pool(0x%p)"
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->reply_post[i].reply_post_free,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ return -EAGAIN;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK *
+ reply_post_free_sz);
+ dma_alloc_count--;
+
+ } else {
+ ioc->reply_post[i].reply_post_free =
+ (Mpi2ReplyDescriptorsUnion_t *)
+ ((long)ioc->reply_post[i-1].reply_post_free
+ + reply_post_free_sz);
+ ioc->reply_post[i].reply_post_free_dma =
+ (dma_addr_t)
+ (ioc->reply_post[i-1].reply_post_free_dma +
+ reply_post_free_sz);
+ }
+ }
+ return 0;
+}
+
+/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
@@ -4972,10 +5041,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
+ u32 rdpq_sz = 0;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i, j;
+ int ret = 0;
struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5129,54 +5200,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
-
- sz = reply_post_free_sz;
+ rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
- sz *= ioc->reply_queue_count;
-
- ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
- (ioc->reply_queue_count):1,
- sizeof(struct reply_post_struct), GFP_KERNEL);
-
- if (!ioc->reply_post) {
- ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
- goto out;
- }
- ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
- goto out;
- }
- i = 0;
- do {
- ioc->reply_post[i].reply_post_free =
- dma_pool_zalloc(ioc->reply_post_free_dma_pool,
- GFP_KERNEL,
- &ioc->reply_post[i].reply_post_free_dma);
- if (!ioc->reply_post[i].reply_post_free) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth,
- 8, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
- (u64)ioc->reply_post[i].reply_post_free_dma));
- total_sz += sz;
- } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
-
- if (ioc->dma_mask > 32) {
- if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
- pci_name(ioc->pdev));
- goto out;
+ rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
+ ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
+ if (ret == -EAGAIN) {
+ /*
+ * Free allocated bad RDPQ memory pools.
+ * Change dma coherent mask to 32 bit and reallocate RDPQ
+ */
+ _base_release_memory_pools(ioc);
+ ioc->use_32bit_dma = true;
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ ioc_err(ioc,
+ "32 DMA mask failed %s\n", pci_name(ioc->pdev));
+ return -ENODEV;
}
- }
-
+ if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
+ return -ENOMEM;
+ } else if (ret == -ENOMEM)
+ return -ENOMEM;
+ total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
+ DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -5188,7 +5233,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
ioc->shost->can_queue));
-
/* contiguous pool for request and chains, 16 byte align, one extra "
* "frame for smid=0
*/
@@ -5405,7 +5449,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* Actual requirement is not alignment, but we need start and end of
* DMA address must have same upper 32 bit address.
*/
- if (!is_MSB_are_same((long)ioc->sense, sz)) {
+ if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
//Release Sense pool & Reallocate
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
@@ -7158,7 +7202,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->smp_affinity_enable = smp_affinity_enable;
ioc->rdpq_array_enable_assigned = 0;
- ioc->dma_mask = 0;
+ ioc->use_32bit_dma = false;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e7197150721f..4fca3939c034 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 33
+#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 34
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -367,6 +367,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
@@ -1026,7 +1027,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
- * @dma_mask: used to set the consistent dma mask
* @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
* pci resource handling
* @fault_reset_work_q_name: fw fault work queue
@@ -1064,6 +1064,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
* @drv_support_bitmap: driver's supported feature bit map
+ * @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1205,7 +1206,6 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
- int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -1254,6 +1254,7 @@ struct MPT3SAS_ADAPTER {
u8 high_iops_queues;
u32 drv_support_bitmap;
bool enable_sdev_max_qd;
+ bool use_32bit_dma;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1471,6 +1472,8 @@ struct MPT3SAS_ADAPTER {
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
u8 is_aero_ioc;
+ struct dentry *debugfs_root;
+ struct dentry *ioc_dump;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
@@ -1478,6 +1481,11 @@ struct MPT3SAS_ADAPTER {
GET_MSIX_INDEX get_msix_index_for_smlio;
};
+struct mpt3sas_debugfs_buffer {
+ void *buf;
+ u32 len;
+};
+
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1781,6 +1789,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_init_debugfs(void);
+void mpt3sas_exit_debugfs(void);
+
/**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @device_info: bitfield providing information about the device.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
new file mode 100644
index 000000000000..a6ab1db81167
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface Support for MPT (Message Passing Technology) based
+ * controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ * Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ *
+ **/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "mpt3sas_base.h"
+#include <linux/debugfs.h>
+
+static struct dentry *mpt3sas_debugfs_root;
+
+/*
+ * _debugfs_iocdump_read - copy ioc dump from debugfs buffer
+ * @filep: File Pointer
+ * @ubuf: Buffer to fill data
+ * @cnt: Length of the buffer
+ * @ppos: Offset in the file
+ */
+
+static ssize_t
+_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+
+{
+ struct mpt3sas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+/*
+ * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file
+ */
+static int
+_debugfs_iocdump_open(struct inode *inode, struct file *file)
+{
+ struct MPT3SAS_ADAPTER *ioc = inode->i_private;
+ struct mpt3sas_debugfs_buffer *debug;
+
+ debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)ioc;
+ debug->len = sizeof(struct MPT3SAS_ADAPTER);
+ file->private_data = debug;
+ return 0;
+}
+
+/*
+ * _debugfs_iocdump_release : release the ioc_dump debugfs attribute
+ * @inode: inode structure to the corresponds device
+ * @file: File pointer
+ */
+static int
+_debugfs_iocdump_release(struct inode *inode, struct file *file)
+{
+ struct mpt3sas_debugfs_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations mpt3sas_debugfs_iocdump_fops = {
+ .owner = THIS_MODULE,
+ .open = _debugfs_iocdump_open,
+ .read = _debugfs_iocdump_read,
+ .release = _debugfs_iocdump_release,
+};
+
+/*
+ * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver
+ */
+void mpt3sas_init_debugfs(void)
+{
+ mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL);
+ if (!mpt3sas_debugfs_root)
+ pr_info("mpt3sas: Cannot create debugfs root\n");
+}
+
+/*
+ * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver
+ */
+void mpt3sas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(mpt3sas_debugfs_root);
+}
+
+/*
+ * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter
+ * ioc: MPT3SAS_ADAPTER object
+ */
+void
+mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ char name[64];
+
+ snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no);
+ if (!ioc->debugfs_root) {
+ ioc->debugfs_root =
+ debugfs_create_dir(name, mpt3sas_debugfs_root);
+ if (!ioc->debugfs_root) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "ioc_dump");
+ ioc->ioc_dump = debugfs_create_file(name, 0444,
+ ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops);
+ if (!ioc->ioc_dump) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create ioc_dump debugfs file\n");
+ debugfs_remove(ioc->debugfs_root);
+ return;
+ }
+
+ snprintf(name, sizeof(name), "host_recovery");
+ debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery);
+
+}
+
+/*
+ * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter
+ * @ioc: MPT3SAS_ADAPTER object
+ */
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ debugfs_remove_recursive(ioc->debugfs_root);
+}
+
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 04a40afe60e3..08fc4b381056 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9928,6 +9928,7 @@ static void scsih_remove(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ mpt3sas_destroy_debugfs(ioc);
sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
@@ -10763,8 +10764,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
/* register EEDP capabilities with SCSI layer */
- if (prot_mask > 0)
- scsi_host_set_prot(shost, prot_mask);
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, (prot_mask & 0x07));
else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -10814,6 +10815,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
scsi_scan_host(shost);
+ mpt3sas_setup_debugfs(ioc);
return 0;
out_add_shost_fail:
mpt3sas_base_detach(ioc);
@@ -11220,6 +11222,7 @@ scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete);
+ mpt3sas_init_debugfs();
return 0;
}
@@ -11251,6 +11254,7 @@ scsih_exit(void)
if (hbas_to_enumerate != 2)
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template);
+ mpt3sas_exit_debugfs();
}
/**
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7af9173c4925..5973eed94938 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
-struct device_attribute *mvst_host_attrs[];
+static struct device_attribute *mvst_host_attrs[];
#define SOC_SAS_NUM 2
@@ -759,8 +759,6 @@ static DEVICE_ATTR(interrupt_coalescing,
mvs_show_interrupt_coalescing,
mvs_store_interrupt_coalescing);
-/* task handler */
-struct task_struct *mvs_th;
static int __init mvs_init(void)
{
int rc;
@@ -785,7 +783,7 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt);
}
-struct device_attribute *mvst_host_attrs[] = {
+static struct device_attribute *mvst_host_attrs[] = {
&dev_attr_driver_version,
&dev_attr_interrupt_coalescing,
NULL,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7eb88fe1eb0b..aa9ae2ae8579 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4652,7 +4652,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
- dma_pool_alloc(
+ dma_pool_zalloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
@@ -4661,8 +4661,6 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
- memset(pinstance->cmd_list[i]->ioa_cb, 0,
- sizeof(struct pmcraid_control_block));
}
return 0;
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399fe10c8..e163be8af965 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -355,6 +355,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
@@ -387,7 +388,9 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
struct delayed_work grcdump_work;
+ struct delayed_work stag_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@@ -403,6 +406,7 @@ struct qedf_ctx {
u32 flogi_cnt;
u32 flogi_failed;
+ u32 flogi_pending;
/* Used for fc statistics */
struct mutex stats_mutex;
@@ -468,7 +472,7 @@ extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
-#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
+#define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 87e169dcebdb..542ba9454257 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -388,14 +388,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata) {
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (rdata)
fc_rport_login(rdata);
- fcport->rdata = rdata;
- } else {
- mutex_unlock(&lport->disc.disc_mutex);
- fcport->rdata = NULL;
- }
+ fcport->rdata = rdata;
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e749a2dcaad7..0f6a15c1a04b 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1021,14 +1021,18 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
atomic_inc(&fcport->ios_to_queue);
if (fcport->retry_delay_timestamp) {
+ /* Take fcport->rport_lock for resetting the delay_timestamp */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0;
} else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY;
atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1134,6 +1138,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int refcount;
u16 scope, qualifier = 0;
u8 fw_residual_flag = 0;
+ unsigned long flags = 0;
+ u16 chk_scope = 0;
if (!io_req)
return;
@@ -1267,16 +1273,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
/* Lower 14 bits */
qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
- if (qedf_retry_delay &&
- scope > 0 && qualifier > 0 &&
- qualifier <= 0x3FEF) {
- /* Check we don't go over the max */
- if (qualifier > QEDF_RETRY_DELAY_MAX)
- qualifier =
- QEDF_RETRY_DELAY_MAX;
- fcport->retry_delay_timestamp =
- jiffies + (qualifier * HZ / 10);
- }
+ if (qedf_retry_delay)
+ chk_scope = 1;
/* Record stats */
if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1285,36 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
+
+ if (chk_scope == 1) {
+ if ((scope == 1 || scope == 2) &&
+ (qualifier > 0 && qualifier <= 0x3FEF)) {
+ /* Check we don't go over the max */
+ if (qualifier > QEDF_RETRY_DELAY_MAX) {
+ qualifier = QEDF_RETRY_DELAY_MAX;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "qualifier = %d\n",
+ (fcp_rsp->retry_delay_timer &
+ 0x3FFF));
+ }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Scope = %d and qualifier = %d",
+ scope, qualifier);
+ /* Take fcport->rport_lock to
+ * update the retry_delay_timestamp
+ */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ fcport->retry_delay_timestamp =
+ jiffies + (qualifier * HZ / 10);
+ spin_unlock_irqrestore(&fcport->rport_lock,
+ flags);
+
+ } else {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
+ scope, qualifier);
+ }
+ }
break;
default:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 5b19f5175c5c..36b1ca2dadbb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -28,6 +28,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
/*
* Driver module parameters.
@@ -282,6 +284,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
+ qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +310,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+ qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -503,6 +511,32 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
}
+static void qedf_bw_update(void *dev)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ struct qed_link_output link;
+
+ /* Get the latest status of the link */
+ qed_ops->common->get_link(qedf->cdev, &link);
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
+ if (link.link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ qedf_update_link_speed(qedf, &link);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore bw update, link is down.\n");
+
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+ }
+}
+
static void qedf_link_update(void *dev, struct qed_link_output *link)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
@@ -629,6 +663,8 @@ static u32 qedf_get_login_failures(void *cookie)
static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
+ .bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -850,6 +886,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
qedf = lport_priv(lport);
+ qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3153,7 +3190,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -3183,6 +3220,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3197,6 +3235,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
mutex_init(&qedf->flush_mutex);
+ qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3206,9 +3245,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3227,6 +3269,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) {
@@ -3281,6 +3324,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+ goto err2;
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3466,6 +3516,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3513,6 +3564,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3538,6 +3593,11 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
+ if (qedf) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+ }
return rc;
}
@@ -3687,11 +3747,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
@@ -3762,11 +3836,64 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
+ if (!qedf) {
+ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ return;
+ }
+ QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ qedf_ctx_soft_reset(qedf->lport);
+}
+
static void qedf_shutdown(struct pci_dev *pdev)
{
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 1f4a5fb00a05..425e665ec08b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -836,6 +836,11 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ return ERR_PTR(-ENXIO);
+ }
+
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
@@ -870,12 +875,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
}
- if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
- QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
- ret = -ENXIO;
- goto ep_conn_exit;
- }
-
ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret)
goto ep_conn_exit;
@@ -1001,7 +1000,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
+ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+ flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
+ if (!abrt_conn)
+ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
@@ -1218,6 +1221,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b995b19865ca..81a307695cc9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -28,6 +28,10 @@
#include "qedi_gbl.h"
#include "qedi_iscsi.h"
+static uint qedi_qed_debug;
+module_param(qedi_qed_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
+
static uint qedi_fw_debug;
module_param(qedi_fw_debug, uint, 0644);
MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
@@ -41,7 +45,7 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off).");
-uint qedi_ll2_buf_size = 0x400;
+static uint qedi_ll2_buf_size = 0x400;
module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
@@ -658,8 +662,6 @@ exit_setup_shost:
static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
- struct qedi_uio_dev *udev;
- struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work;
struct ethhdr *eh;
@@ -698,9 +700,6 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
"Allowed frame ethertype [0x%x] len [0x%x].\n",
eh->h_proto, skb->len);
- udev = qedi->udev;
- uctrl = udev->uctrl;
-
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -921,7 +920,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
@@ -1302,13 +1301,13 @@ process_again:
"process already running\n");
}
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_update_sb_idx(fp->sb_info);
/* Check for more work */
rmb();
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
else
goto process_again;
@@ -1360,7 +1359,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ for (i = 0; i < qedi->int_info.msix_cnt; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -2422,7 +2421,6 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
struct qed_ll2_params params;
- u32 dp_module = 0;
u8 dp_level = 0;
bool is_vf = false;
char host_buf[16];
@@ -2445,7 +2443,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
- qed_params.dp_module = dp_module;
+ qed_params.dp_module = qedi_qed_debug;
qed_params.dp_level = dp_level;
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3337cd341d21..441a45349349 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -526,7 +526,7 @@ static struct pci_device_id qla1280_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
-DEFINE_MUTEX(qla1280_firmware_mutex);
+static DEFINE_MUTEX(qla1280_firmware_mutex);
struct qla_fw {
char *fwname;
@@ -535,7 +535,7 @@ struct qla_fw {
#define QL_NUM_FW_IMAGES 3
-struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
+static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
{"qlogic/1040.bin", NULL}, /* image 0 */
{"qlogic/1280.bin", NULL}, /* image 1 */
{"qlogic/12160.bin", NULL}, /* image 2 */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2c9e5ac24692..5d93ccc73153 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -26,7 +26,8 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
+ ha->mpi_fw_dump_reading))
return 0;
mutex_lock(&ha->optrom_mutex);
@@ -42,6 +43,10 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE);
+ } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off,
+ ha->mpi_fw_dump,
+ ha->mpi_fw_dump_len);
} else if (ha->fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
@@ -79,7 +84,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_md_prep(vha);
}
ha->fw_dump_reading = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
break;
case 1:
if (ha->fw_dumped && !ha->fw_dump_reading) {
@@ -103,7 +108,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
} else {
- ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
}
break;
@@ -137,6 +141,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 8:
+ if (!ha->mpi_fw_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70e7,
+ "MPI firmware dump cleared on (%ld).\n", vha->host_no);
+ ha->mpi_fw_dump_reading = 0;
+ ha->mpi_fw_dumped = 0;
+ break;
+ case 9:
+ if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
+ ha->mpi_fw_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70e8,
+ "Raw MPI firmware dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
}
return count;
}
@@ -207,10 +227,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
/* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) {
- uint32_t *iter;
+ __le32 *iter = (__force __le32 *)buf;
uint32_t chksum;
- iter = (uint32_t *)buf;
chksum = 0;
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
chksum += le32_to_cpu(*iter);
@@ -706,7 +725,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -724,6 +744,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0);
break;
+ } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla27xx_reset_mpi(vha);
} else {
/* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -737,6 +759,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
}
+ break;
case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
@@ -1898,9 +1921,8 @@ static char *mode_to_str[] = {
};
#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
-static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
{
- int rc = 0;
enum {
NO_ACTION,
MODE_CHANGE_ACCEPT,
@@ -2173,8 +2195,6 @@ static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
vha->ql2xexchoffld, vha->u_ql2xexchoffld);
break;
}
-
- return rc;
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 97b51c477972..88c0338a2ec7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -490,7 +490,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
>> 24;
switch (loop_id) {
case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
+ loop_id = NPH_SNS;
break;
case 0xFA:
loop_id = vha->mgmt_svr_loop_id;
@@ -691,7 +691,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
* dump and reset the chip.
*/
if (ret) {
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
rval = -EINVAL;
@@ -896,7 +896,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
* doesn't work take FCoE dump and then
* reset the chip.
*/
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -2042,7 +2042,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->loop_id = piocb_rqst->dataword;
+ fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index bf1e98f11990..19005710f7f6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -115,7 +115,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -126,26 +126,26 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
- WRT_REG_WORD(&reg->mailbox9, 0);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox9, 0);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
@@ -155,15 +155,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat != 0x10 && stat != 0x11) {
/* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -189,13 +189,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
int
-qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
- uint32_t ram_dwords, void **nxt)
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval = QLA_FUNCTION_FAILED;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -206,23 +206,23 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
@@ -231,15 +231,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat &= 0xff;
if (stat != 0x1 && stat != 0x2 &&
stat != 0x10 && stat != 0x11) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -254,9 +254,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
return rval;
}
for (j = 0; j < dwords; j++) {
- ram[i + j] =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
- chunk[j] : swab32(chunk[j]);
+ ram[i + j] = (__force __be32)
+ ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]));
}
}
@@ -265,8 +265,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
static int
-qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
- uint32_t cram_size, void **nxt)
+qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
+ uint32_t cram_size, void **nxt)
{
int rval;
@@ -286,16 +286,16 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
return rval;
}
-static uint32_t *
+static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
- uint32_t count, uint32_t *buf)
+ uint32_t count, __be32 *buf)
{
- uint32_t __iomem *dmp_reg;
+ __le32 __iomem *dmp_reg;
- WRT_REG_DWORD(&reg->iobase_addr, iobase);
+ wrt_reg_dword(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
for ( ; count--; dmp_reg++)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg));
+ *buf++ = htonl(rd_reg_dword(dmp_reg));
return buf;
}
@@ -303,11 +303,11 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
void
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
{
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
/* 100 usec delay is sufficient enough for hardware to pause RISC */
udelay(100);
- if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
}
@@ -324,17 +324,17 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
* Driver can proceed with the reset sequence after waiting
* for a timeout period.
*/
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
@@ -342,19 +342,19 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
/* Wait for soft-reset to complete. */
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr); /* PCI Posting. */
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(10);
@@ -368,7 +368,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
}
static int
-qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
uint32_t ram_words, void **nxt)
{
int rval;
@@ -376,7 +376,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
uint16_t mb0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint16_t *dump = (uint16_t *)ha->gid_list;
+ __le16 *dump = (__force __le16 *)ha->gid_list;
rval = QLA_SUCCESS;
mb0 = 0;
@@ -399,11 +399,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
WRT_MAILBOX_REG(ha, reg, 4, words);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_INT) {
stat &= 0xff;
@@ -414,10 +414,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
} else if (stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
@@ -425,15 +425,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
/* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -441,7 +441,8 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < words; idx++)
- ram[cnt + idx] = swab16(dump[idx]);
+ ram[cnt + idx] =
+ cpu_to_be16(le16_to_cpu(dump[idx]));
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -453,12 +454,12 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
- uint16_t *buf)
+ __be16 *buf)
{
- uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+ __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
for ( ; count--; dmp_reg++)
- *buf++ = htons(RD_REG_WORD(dmp_reg));
+ *buf++ = htons(rd_reg_word(dmp_reg));
}
static inline void *
@@ -472,10 +473,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
}
static inline void *
-qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt;
- uint32_t *iter_reg;
+ __be32 *iter_reg;
struct qla2xxx_fce_chain *fcec = ptr;
if (!ha->fce)
@@ -499,7 +500,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -517,11 +518,11 @@ qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
-qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -539,12 +540,12 @@ qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
- uint32_t **last_chain)
+ __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -591,7 +592,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
}
static inline void *
-qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -662,7 +663,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt, que_idx;
uint8_t que_cnt;
@@ -685,13 +686,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
mq->qregs[que_idx] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
mq->qregs[que_idx+1] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
mq->qregs[que_idx+2] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
mq->qregs[que_idx+3] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -706,45 +707,47 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
ql_log(ql_log_warn, vha, 0xd000,
"Failed to dump firmware (%x), dump status flags (0x%lx).\n",
rval, ha->fw_dump_cap_flags);
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
} else {
ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
+void qla2xxx_dump_fw(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ vha->hw->isp_ops->fw_dump(vha);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2300_fw_dump *fw;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd002,
"No buffer available for dump.\n");
- goto qla2300_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -752,19 +755,19 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2300_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp23;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2300(ha)) {
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -772,74 +775,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
cnt++, dmp_reg++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
cnt++, dmp_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x40);
+ wrt_reg_word(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x50);
+ wrt_reg_word(&reg->ctrl_status, 0x50);
qla2xxx_read_window(reg, 48, fw->dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2800);
+ wrt_reg_word(&reg->pcr, 0x2800);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2A00);
+ wrt_reg_word(&reg->pcr, 0x2A00);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2C00);
+ wrt_reg_word(&reg->pcr, 0x2C00);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2E00);
+ wrt_reg_word(&reg->pcr, 0x2E00);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset RISC. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
@@ -860,12 +863,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Get RISC SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
- sizeof(fw->risc_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->risc_ram), &nxt);
/* Get stack SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
- sizeof(fw->stack_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->stack_ram), &nxt);
/* Get data SRAM. */
if (rval == QLA_SUCCESS)
@@ -876,48 +879,31 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, nxt);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2300_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt, timer;
- uint16_t risc_address;
- uint16_t mb0, mb2;
+ uint16_t risc_address = 0;
+ uint16_t mb0 = 0, mb2 = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2100_fw_dump *fw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = 0;
- mb0 = mb2 = 0;
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd004,
"No buffer available for dump.\n");
- goto qla2100_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -925,17 +911,17 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2100_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp21;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -944,61 +930,61 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2100);
+ wrt_reg_word(&reg->pcr, 0x2100);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2300);
+ wrt_reg_word(&reg->pcr, 0x2300);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2500);
+ wrt_reg_word(&reg->pcr, 0x2500);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2700);
+ wrt_reg_word(&reg->pcr, 0x2700);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset the ISP. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
}
for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
@@ -1011,11 +997,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Pause RISC. */
if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
- (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+ (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -1025,13 +1011,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
/* Set memory configuration and timing. */
if (IS_QLA2100(ha))
- WRT_REG_WORD(&reg->mctr, 0xf1);
+ wrt_reg_word(&reg->mctr, 0xf1);
else
- WRT_REG_WORD(&reg->mctr, 0xf2);
- RD_REG_WORD(&reg->mctr); /* PCI Posting. */
+ wrt_reg_word(&reg->mctr, 0xf2);
+ rd_reg_word(&reg->mctr); /* PCI Posting. */
/* Release RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
}
}
@@ -1041,29 +1027,29 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
}
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
cnt++, risc_address++) {
WRT_MAILBOX_REG(ha, reg, 1, risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer != 0; timer--) {
/* Check for pending interrupts. */
- if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
mb0 = RD_MAILBOX_REG(ha, reg, 0);
mb2 = RD_MAILBOX_REG(ha, reg, 2);
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -1080,48 +1066,35 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2100_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla24xx_fw_dump *fw;
void *nxt;
void *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ lockdep_assert_held(&ha->hardware_lock);
+
if (IS_P3P_TYPE(ha))
return;
- flags = 0;
ha->fw_dump_cap_flags = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd006,
"No buffer available for dump.\n");
- goto qla24xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1129,13 +1102,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla24xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1145,41 +1118,41 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1218,19 +1191,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1339,44 +1312,31 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla24xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla25xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd008,
"No buffer available for dump.\n");
- goto qla25xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1384,14 +1344,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla25xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1405,73 +1365,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1535,19 +1495,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1665,44 +1625,31 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla25xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla81xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla81xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00a,
"No buffer available for dump.\n");
- goto qla81xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1710,12 +1657,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla81xx_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp81;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1729,73 +1676,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1859,19 +1806,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1993,57 +1940,44 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla81xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla83xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla83xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
"No buffer available for dump!!!\n");
- goto qla83xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xd00d,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
- goto qla83xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -2051,24 +1985,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
*/
qla24xx_pause_risc(reg, ha);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ wrt_reg_dword(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ wrt_reg_dword(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -2077,73 +2011,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7040, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2239,19 +2173,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2457,16 +2391,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
udelay(5);
if (!cnt) {
@@ -2507,14 +2441,6 @@ copy_queue:
qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla83xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/****************************************************************************/
@@ -2735,7 +2661,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint16_t __iomem *mbx_reg;
+ __le16 __iomem *mbx_reg;
if (!ql_mask_match(level))
return;
@@ -2750,7 +2676,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
ql_dbg(level, vha, id, "Mailbox registers:\n");
for (i = 0; i < 6; i++, mbx_reg++)
ql_dbg(level, vha, id,
- "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
+ "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 433e95502808..54ed020e6f75 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -12,205 +12,205 @@
*/
struct qla2300_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t risc_host_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t resp_dma_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[64];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf800];
- uint16_t stack_ram[0x1000];
- uint16_t data_ram[1];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 risc_host_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 resp_dma_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[64];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf800];
+ __be16 stack_ram[0x1000];
+ __be16 data_ram[1];
};
struct qla2100_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[16];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf000];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[16];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf000];
};
struct qla24xx_fw_dump {
- uint32_t host_status;
- uint32_t host_reg[32];
- uint32_t shadow_reg[7];
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[16];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[16];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[112];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[176];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_reg[32];
+ __be32 shadow_reg[7];
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[16];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[16];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[112];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[176];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla25xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[192];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[192];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla81xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[224];
- uint32_t fb_hdw_reg[208];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[224];
+ __be32 fb_hdw_reg[208];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla83xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[48];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[256];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t xseq_2_reg[16];
- uint32_t rseq_gp_reg[256];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t rseq_3_reg[16];
- uint32_t aseq_gp_reg[256];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t aseq_3_reg[16];
- uint32_t cmd_dma_reg[64];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[256];
- uint32_t rq0_array_reg[256];
- uint32_t rq1_array_reg[256];
- uint32_t rp0_array_reg[256];
- uint32_t rp1_array_reg[256];
- uint32_t queue_control_reg[16];
- uint32_t fb_hdw_reg[432];
- uint32_t at0_array_reg[128];
- uint32_t code_ram[0x2400];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[48];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[256];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 xseq_2_reg[16];
+ __be32 rseq_gp_reg[256];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 rseq_3_reg[16];
+ __be32 aseq_gp_reg[256];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 aseq_3_reg[16];
+ __be32 cmd_dma_reg[64];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[256];
+ __be32 rq0_array_reg[256];
+ __be32 rq1_array_reg[256];
+ __be32 rp0_array_reg[256];
+ __be32 rp1_array_reg[256];
+ __be32 queue_control_reg[16];
+ __be32 fb_hdw_reg[432];
+ __be32 at0_array_reg[128];
+ __be32 code_ram[0x2400];
+ __be32 ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
@@ -223,44 +223,45 @@ struct qla83xx_fw_dump {
#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
struct qla2xxx_fce_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- uint32_t addr_l;
- uint32_t addr_h;
- uint32_t eregs[8];
+ __be32 size;
+ __be32 addr_l;
+ __be32 addr_h;
+ __be32 eregs[8];
};
/* used by exchange off load and extended login offload */
struct qla2xxx_offld_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- u64 addr;
+ __be32 size;
+ __be32 reserved;
+ __be64 addr;
};
struct qla2xxx_mq_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t count;
- uint32_t qregs[4 * QLA_MQ_SIZE];
+ __be32 count;
+ __be32 qregs[4 * QLA_MQ_SIZE];
};
struct qla2xxx_mqueue_header {
- uint32_t queue;
+ __be32 queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3
- uint32_t number;
- uint32_t size;
+ __be32 number;
+ __be32 size;
};
struct qla2xxx_mqueue_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
};
#define DUMP_CHAIN_VARIANT 0x80000000
@@ -273,28 +274,28 @@ struct qla2xxx_mqueue_chain {
struct qla2xxx_fw_dump {
uint8_t signature[4];
- uint32_t version;
+ __be32 version;
- uint32_t fw_major_version;
- uint32_t fw_minor_version;
- uint32_t fw_subminor_version;
- uint32_t fw_attributes;
+ __be32 fw_major_version;
+ __be32 fw_minor_version;
+ __be32 fw_subminor_version;
+ __be32 fw_attributes;
- uint32_t vendor;
- uint32_t device;
- uint32_t subsystem_vendor;
- uint32_t subsystem_device;
+ __be32 vendor;
+ __be32 device;
+ __be32 subsystem_vendor;
+ __be32 subsystem_device;
- uint32_t fixed_size;
- uint32_t mem_size;
- uint32_t req_q_size;
- uint32_t rsp_q_size;
+ __be32 fixed_size;
+ __be32 mem_size;
+ __be32 req_q_size;
+ __be32 rsp_q_size;
- uint32_t eft_size;
- uint32_t eft_addr_l;
- uint32_t eft_addr_h;
+ __be32 eft_size;
+ __be32 eft_addr_l;
+ __be32 eft_addr_h;
- uint32_t header_size;
+ __be32 header_size;
union {
struct qla2100_fw_dump isp21;
@@ -369,7 +370,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
-extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *,
uint32_t, void **);
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 47c7a56438b5..42dbf90d4651 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -128,15 +128,50 @@ static inline uint32_t make_handle(uint16_t x, uint16_t y)
* I/O register
*/
-#define RD_REG_BYTE(addr) readb(addr)
-#define RD_REG_WORD(addr) readw(addr)
-#define RD_REG_DWORD(addr) readl(addr)
-#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
-#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
-#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data) writeb(data, addr)
-#define WRT_REG_WORD(addr, data) writew(data, addr)
-#define WRT_REG_DWORD(addr, data) writel(data, addr)
+static inline u8 rd_reg_byte(const volatile u8 __iomem *addr)
+{
+ return readb(addr);
+}
+
+static inline u16 rd_reg_word(const volatile __le16 __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data)
+{
+ return writeb(data, addr);
+}
+
+static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data)
+{
+ return writew(data, addr);
+}
+
+static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data)
+{
+ return writel(data, addr);
+}
/*
* ISP83XX specific remote register addresses
@@ -469,7 +504,7 @@ struct srb_iocb {
u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
- uint32_t fw_status[3];
+ __le32 fw_status[3];
__le16 comp_status;
__le16 len;
} els_plogi;
@@ -520,8 +555,8 @@ struct srb_iocb {
#define MAX_IOCB_MB_REG 28
#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
- __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
+ u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
struct completion comp;
@@ -532,7 +567,7 @@ struct srb_iocb {
} nack;
struct {
__le16 comp_status;
- uint16_t rsp_pyld_len;
+ __le16 rsp_pyld_len;
uint8_t aen_op;
void *desc;
@@ -663,23 +698,23 @@ struct msg_echo_lb {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_2xxx {
- uint16_t flash_address; /* Flash BIOS address */
- uint16_t flash_data; /* Flash BIOS data */
- uint16_t unused_1[1]; /* Gap */
- uint16_t ctrl_status; /* Control/Status */
+ __le16 flash_address; /* Flash BIOS address */
+ __le16 flash_data; /* Flash BIOS data */
+ __le16 unused_1[1]; /* Gap */
+ __le16 ctrl_status; /* Control/Status */
#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */
#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */
#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */
- uint16_t ictrl; /* Interrupt control */
+ __le16 ictrl; /* Interrupt control */
#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */
#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */
- uint16_t istatus; /* Interrupt status */
+ __le16 istatus; /* Interrupt status */
#define ISR_RISC_INT BIT_3 /* RISC interrupt */
- uint16_t semaphore; /* Semaphore */
- uint16_t nvram; /* NVRAM register. */
+ __le16 semaphore; /* Semaphore */
+ __le16 nvram; /* NVRAM register. */
#define NVR_DESELECT 0
#define NVR_BUSY BIT_15
#define NVR_WRT_ENABLE BIT_14 /* Write enable */
@@ -693,80 +728,80 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t unused_2[59]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 unused_2[59]; /* Gap */
} __attribute__((packed)) isp2100;
struct {
/* Request Queue */
- uint16_t req_q_in; /* In-Pointer */
- uint16_t req_q_out; /* Out-Pointer */
+ __le16 req_q_in; /* In-Pointer */
+ __le16 req_q_out; /* Out-Pointer */
/* Response Queue */
- uint16_t rsp_q_in; /* In-Pointer */
- uint16_t rsp_q_out; /* Out-Pointer */
+ __le16 rsp_q_in; /* In-Pointer */
+ __le16 rsp_q_out; /* Out-Pointer */
/* RISC to Host Status */
- uint32_t host_status;
+ __le32 host_status;
#define HSR_RISC_INT BIT_15 /* RISC interrupt */
#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */
/* Host to Host Semaphore */
- uint16_t host_semaphore;
- uint16_t unused_3[17]; /* Gap */
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
- uint16_t fb_cmd;
- uint16_t unused_4[10]; /* Gap */
+ __le16 host_semaphore;
+ __le16 unused_3[17]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+ __le16 fb_cmd;
+ __le16 unused_4[10]; /* Gap */
} __attribute__((packed)) isp2300;
} u;
- uint16_t fpm_diag_config;
- uint16_t unused_5[0x4]; /* Gap */
- uint16_t risc_hw;
- uint16_t unused_5_1; /* Gap */
- uint16_t pcr; /* Processor Control Register. */
- uint16_t unused_6[0x5]; /* Gap */
- uint16_t mctr; /* Memory Configuration and Timing. */
- uint16_t unused_7[0x3]; /* Gap */
- uint16_t fb_cmd_2100; /* Unused on 23XX */
- uint16_t unused_8[0x3]; /* Gap */
- uint16_t hccr; /* Host command & control register. */
+ __le16 fpm_diag_config;
+ __le16 unused_5[0x4]; /* Gap */
+ __le16 risc_hw;
+ __le16 unused_5_1; /* Gap */
+ __le16 pcr; /* Processor Control Register. */
+ __le16 unused_6[0x5]; /* Gap */
+ __le16 mctr; /* Memory Configuration and Timing. */
+ __le16 unused_7[0x3]; /* Gap */
+ __le16 fb_cmd_2100; /* Unused on 23XX */
+ __le16 unused_8[0x3]; /* Gap */
+ __le16 hccr; /* Host command & control register. */
#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */
#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */
/* HCCR commands */
@@ -779,9 +814,9 @@ struct device_reg_2xxx {
#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */
- uint16_t unused_9[5]; /* Gap */
- uint16_t gpiod; /* GPIO Data register. */
- uint16_t gpioe; /* GPIO Enable register. */
+ __le16 unused_9[5]; /* Gap */
+ __le16 gpiod; /* GPIO Data register. */
+ __le16 gpioe; /* GPIO Enable register. */
#define GPIO_LED_MASK 0x00C0
#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000
#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
@@ -793,95 +828,95 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t unused_10[8]; /* Gap */
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23; /* Also probe reg. */
+ __le16 unused_10[8]; /* Gap */
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23; /* Also probe reg. */
} __attribute__((packed)) isp2200;
} u_end;
};
struct device_reg_25xxmq {
- uint32_t req_q_in;
- uint32_t req_q_out;
- uint32_t rsp_q_in;
- uint32_t rsp_q_out;
- uint32_t atio_q_in;
- uint32_t atio_q_out;
+ __le32 req_q_in;
+ __le32 req_q_out;
+ __le32 rsp_q_in;
+ __le32 rsp_q_out;
+ __le32 atio_q_in;
+ __le32 atio_q_out;
};
struct device_reg_fx00 {
- uint32_t mailbox0; /* 00 */
- uint32_t mailbox1; /* 04 */
- uint32_t mailbox2; /* 08 */
- uint32_t mailbox3; /* 0C */
- uint32_t mailbox4; /* 10 */
- uint32_t mailbox5; /* 14 */
- uint32_t mailbox6; /* 18 */
- uint32_t mailbox7; /* 1C */
- uint32_t mailbox8; /* 20 */
- uint32_t mailbox9; /* 24 */
- uint32_t mailbox10; /* 28 */
- uint32_t mailbox11;
- uint32_t mailbox12;
- uint32_t mailbox13;
- uint32_t mailbox14;
- uint32_t mailbox15;
- uint32_t mailbox16;
- uint32_t mailbox17;
- uint32_t mailbox18;
- uint32_t mailbox19;
- uint32_t mailbox20;
- uint32_t mailbox21;
- uint32_t mailbox22;
- uint32_t mailbox23;
- uint32_t mailbox24;
- uint32_t mailbox25;
- uint32_t mailbox26;
- uint32_t mailbox27;
- uint32_t mailbox28;
- uint32_t mailbox29;
- uint32_t mailbox30;
- uint32_t mailbox31;
- uint32_t aenmailbox0;
- uint32_t aenmailbox1;
- uint32_t aenmailbox2;
- uint32_t aenmailbox3;
- uint32_t aenmailbox4;
- uint32_t aenmailbox5;
- uint32_t aenmailbox6;
- uint32_t aenmailbox7;
+ __le32 mailbox0; /* 00 */
+ __le32 mailbox1; /* 04 */
+ __le32 mailbox2; /* 08 */
+ __le32 mailbox3; /* 0C */
+ __le32 mailbox4; /* 10 */
+ __le32 mailbox5; /* 14 */
+ __le32 mailbox6; /* 18 */
+ __le32 mailbox7; /* 1C */
+ __le32 mailbox8; /* 20 */
+ __le32 mailbox9; /* 24 */
+ __le32 mailbox10; /* 28 */
+ __le32 mailbox11;
+ __le32 mailbox12;
+ __le32 mailbox13;
+ __le32 mailbox14;
+ __le32 mailbox15;
+ __le32 mailbox16;
+ __le32 mailbox17;
+ __le32 mailbox18;
+ __le32 mailbox19;
+ __le32 mailbox20;
+ __le32 mailbox21;
+ __le32 mailbox22;
+ __le32 mailbox23;
+ __le32 mailbox24;
+ __le32 mailbox25;
+ __le32 mailbox26;
+ __le32 mailbox27;
+ __le32 mailbox28;
+ __le32 mailbox29;
+ __le32 mailbox30;
+ __le32 mailbox31;
+ __le32 aenmailbox0;
+ __le32 aenmailbox1;
+ __le32 aenmailbox2;
+ __le32 aenmailbox3;
+ __le32 aenmailbox4;
+ __le32 aenmailbox5;
+ __le32 aenmailbox6;
+ __le32 aenmailbox7;
/* Request Queue. */
- uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
- uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ __le32 req_q_in; /* A0 - Request Queue In-Pointer */
+ __le32 req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
- uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
- uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */
+ __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
- uint32_t initval0; /* B0 */
- uint32_t initval1; /* B4 */
- uint32_t initval2; /* B8 */
- uint32_t initval3; /* BC */
- uint32_t initval4; /* C0 */
- uint32_t initval5; /* C4 */
- uint32_t initval6; /* C8 */
- uint32_t initval7; /* CC */
- uint32_t fwheartbeat; /* D0 */
- uint32_t pseudoaen; /* D4 */
+ __le32 initval0; /* B0 */
+ __le32 initval1; /* B4 */
+ __le32 initval2; /* B8 */
+ __le32 initval3; /* BC */
+ __le32 initval4; /* C0 */
+ __le32 initval5; /* C4 */
+ __le32 initval6; /* C8 */
+ __le32 initval7; /* CC */
+ __le32 fwheartbeat; /* D0 */
+ __le32 pseudoaen; /* D4 */
};
@@ -921,18 +956,18 @@ typedef union {
&(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
&(reg)->u.isp2300.mailbox0 + (num))
#define RD_MAILBOX_REG(ha, reg, num) \
- RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+ rd_reg_word(MAILBOX_REG(ha, reg, num))
#define WRT_MAILBOX_REG(ha, reg, num, data) \
- WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+ wrt_reg_word(MAILBOX_REG(ha, reg, num), data)
#define FB_CMD_REG(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
&(reg)->fb_cmd_2100 : \
&(reg)->u.isp2300.fb_cmd)
#define RD_FB_CMD_REG(ha, reg) \
- RD_REG_WORD(FB_CMD_REG(ha, reg))
+ rd_reg_word(FB_CMD_REG(ha, reg))
#define WRT_FB_CMD_REG(ha, reg, data) \
- WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+ wrt_reg_word(FB_CMD_REG(ha, reg), data)
typedef struct {
uint32_t out_mb; /* outbound from driver */
@@ -1316,7 +1351,7 @@ typedef struct {
uint8_t port_id[4];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
- uint16_t execution_throttle;
+ __le16 execution_throttle;
uint16_t execution_count;
uint8_t reset_count;
uint8_t reserved_2;
@@ -1402,9 +1437,9 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 frame_payload_size;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1413,17 +1448,17 @@ typedef struct {
uint8_t login_timeout;
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t request_q_outpointer;
- uint16_t response_q_inpointer;
- uint16_t request_q_length;
- uint16_t response_q_length;
- __le64 request_q_address __packed;
- __le64 response_q_address __packed;
+ __le16 request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_length;
+ __le16 response_q_length;
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
- uint16_t lun_enables;
+ __le16 lun_enables;
uint8_t command_resource_count;
uint8_t immediate_notify_resource_count;
- uint16_t timeout;
+ __le16 timeout;
uint8_t reserved_2[2];
/*
@@ -1571,8 +1606,8 @@ typedef struct {
uint8_t firmware_options[2];
uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1696,7 +1731,7 @@ typedef struct {
uint8_t reset_delay;
uint8_t port_down_retry_count;
uint8_t boot_id_number;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint8_t fcode_boot_port_name[WWN_SIZE];
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
@@ -1802,7 +1837,7 @@ struct atio {
};
typedef union {
- uint16_t extended;
+ __le16 extended;
struct {
uint8_t reserved;
uint8_t standard;
@@ -1828,18 +1863,18 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
#define CF_WRITE BIT_6
#define CF_READ BIT_5
#define CF_SIMPLE_TAG BIT_3
#define CF_ORDERED_TAG BIT_2
#define CF_HEAD_TAG BIT_1
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
union {
struct dsd32 dsd32[3];
struct dsd64 dsd64[2];
@@ -1857,11 +1892,11 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
struct dsd64 dsd[2];
@@ -1923,7 +1958,7 @@ struct crc_context {
__le16 guard_seed; /* Initial Guard Seed */
__le16 prot_opts; /* Requested Data Protection Mode */
__le16 blk_size; /* Data size in bytes */
- uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ __le16 runt_blk_guard; /* Guard value for runt block (tape
* only) */
__le32 byte_count; /* Total byte count/ total data
* transfer count */
@@ -1976,13 +2011,13 @@ typedef struct {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t scsi_status; /* SCSI status. */
- uint16_t comp_status; /* Completion status. */
- uint16_t state_flags; /* State flags. */
- uint16_t status_flags; /* Status flags. */
- uint16_t rsp_info_len; /* Response Info Length. */
- uint16_t req_sense_length; /* Request sense data length. */
- uint32_t residual_length; /* Residual transfer length. */
+ __le16 scsi_status; /* SCSI status. */
+ __le16 comp_status; /* Completion status. */
+ __le16 state_flags; /* State flags. */
+ __le16 status_flags; /* Status flags. */
+ __le16 rsp_info_len; /* Response Info Length. */
+ __le16 req_sense_length; /* Request sense data length. */
+ __le32 residual_length; /* Residual transfer length. */
uint8_t rsp_info[8]; /* FCP response information. */
uint8_t req_sense_data[32]; /* Request sense data. */
} sts_entry_t;
@@ -2114,8 +2149,8 @@ typedef struct {
/* clear port changed, */
/* use sequence number. */
uint8_t reserved_1;
- uint16_t sequence_number; /* Sequence number of event */
- uint16_t lun; /* SCSI LUN */
+ __le16 sequence_number; /* Sequence number of event */
+ __le16 lun; /* SCSI LUN */
uint8_t reserved_2[48];
} mrk_entry_t;
@@ -2130,19 +2165,19 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle1; /* System handle. */
target_id_t loop_id;
- uint16_t status;
- uint16_t control_flags; /* Control flags. */
+ __le16 status;
+ __le16 control_flags; /* Control flags. */
uint16_t reserved2;
- uint16_t timeout;
- uint16_t cmd_dsd_count;
- uint16_t total_dsd_count;
+ __le16 timeout;
+ __le16 cmd_dsd_count;
+ __le16 total_dsd_count;
uint8_t type;
uint8_t r_ctl;
- uint16_t rx_id;
+ __le16 rx_id;
uint16_t reserved3;
uint32_t handle2;
- uint32_t rsp_bytecount;
- uint32_t req_bytecount;
+ __le32 rsp_bytecount;
+ __le32 req_bytecount;
struct dsd64 req_dsd;
struct dsd64 rsp_dsd;
} ms_iocb_entry_t;
@@ -2170,20 +2205,20 @@ struct mbx_entry {
uint32_t handle;
target_id_t loop_id;
- uint16_t status;
- uint16_t state_flags;
- uint16_t status_flags;
+ __le16 status;
+ __le16 state_flags;
+ __le16 status_flags;
uint32_t sys_define2[2];
- uint16_t mb0;
- uint16_t mb1;
- uint16_t mb2;
- uint16_t mb3;
- uint16_t mb6;
- uint16_t mb7;
- uint16_t mb9;
- uint16_t mb10;
+ __le16 mb0;
+ __le16 mb1;
+ __le16 mb2;
+ __le16 mb3;
+ __le16 mb6;
+ __le16 mb7;
+ __le16 mb9;
+ __le16 mb10;
uint32_t reserved_2[2];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -2205,52 +2240,52 @@ struct imm_ntfy_from_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t lun;
+ __le16 lun;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
+ __le16 status_modifier;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
#define SRR_IU_DATA_IN 0x1
#define SRR_IU_DATA_OUT 0x5
#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
+ __le16 srr_ox_id;
uint8_t reserved_2[28];
} isp2x;
struct {
uint32_t reserved;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_2;
- uint16_t flags;
+ __le16 flags;
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_ox_id;
union {
struct {
uint8_t node_name[8];
} plogi; /* PLOGI/ADISC/PDISC */
struct {
/* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
+ __le16 wd3_lo;
uint8_t resv0[6];
} prli;
struct {
uint8_t port_id[3];
uint8_t resv1;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t resv2;
} req_els;
} u;
@@ -2263,7 +2298,7 @@ struct imm_ntfy_from_isp {
} isp24;
} u;
uint16_t reserved_7;
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#endif
@@ -2653,8 +2688,8 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
@@ -2666,11 +2701,11 @@ struct ct_fdmi_hba_attr {
uint8_t orom_version[16];
uint8_t fw_version[32];
uint8_t os_version[128];
- uint32_t max_ct_len;
+ __be32 max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_specific_info;
- uint32_t num_ports;
+ __be32 vendor_specific_info;
+ __be32 num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
uint8_t vendor_identifier[8];
@@ -2678,12 +2713,12 @@ struct ct_fdmi_hba_attr {
};
struct ct_fdmi1_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
};
struct ct_fdmi2_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
@@ -2735,44 +2770,44 @@ struct ct_fdmi2_hba_attributes {
#define FC_CLASS_2_3 0x0C
struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
+ __be32 sup_speed;
+ __be32 cur_speed;
+ __be32 max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
- uint32_t port_type;
- uint32_t port_supported_cos;
+ __be32 port_type;
+ __be32 port_supported_cos;
uint8_t fabric_name[WWN_SIZE];
uint8_t port_fc4_type[32];
- uint32_t port_state;
- uint32_t num_ports;
- uint32_t port_id;
+ __be32 port_state;
+ __be32 num_ports;
+ __be32 port_id;
uint8_t smartsan_service[24];
uint8_t smartsan_guid[16];
uint8_t smartsan_version[24];
uint8_t smartsan_prod_name[16];
- uint32_t smartsan_port_info;
- uint32_t smartsan_qos_support;
- uint32_t smartsan_security_support;
+ __be32 smartsan_port_info;
+ __be32 smartsan_qos_support;
+ __be32 smartsan_security_support;
} a;
};
struct ct_fdmi1_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
struct ct_fdmi2_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
@@ -2826,8 +2861,8 @@ struct ct_cmd_hdr {
/* CT command request */
struct ct_sns_req {
struct ct_cmd_hdr header;
- uint16_t command;
- uint16_t max_rsp_size;
+ __be16 command;
+ __be16 max_rsp_size;
uint8_t fragment_id;
uint8_t reserved[3];
@@ -2884,7 +2919,7 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
+ __be32 entry_count;
uint8_t port_name[8];
struct ct_fdmi2_hba_attributes attrs;
} rhba;
@@ -2939,7 +2974,7 @@ struct ct_sns_req {
/* CT command response header */
struct ct_rsp_hdr {
struct ct_cmd_hdr header;
- uint16_t response;
+ __be16 response;
uint16_t residual;
uint8_t fragment_id;
uint8_t reason_code;
@@ -3025,8 +3060,8 @@ struct ct_sns_rsp {
} gfpn_id;
struct {
- uint16_t speeds;
- uint16_t speed;
+ __be16 speeds;
+ __be16 speed;
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
@@ -3116,13 +3151,13 @@ struct fab_scan {
struct sns_cmd_pkt {
union {
struct {
- uint16_t buffer_length;
- uint16_t reserved_1;
- __le64 buffer_address __packed;
- uint16_t subcommand_length;
- uint16_t reserved_2;
- uint16_t subcommand;
- uint16_t size;
+ __le16 buffer_length;
+ __le16 reserved_1;
+ __le64 buffer_address __packed;
+ __le16 subcommand_length;
+ __le16 reserved_2;
+ __le16 subcommand;
+ __le16 size;
uint32_t reserved_3;
uint8_t param[36];
} cmd;
@@ -3148,7 +3183,7 @@ struct gid_list_info {
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
- uint16_t loop_id; /* ISP23XX -- 6 bytes. */
+ __le16 loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
@@ -3222,7 +3257,8 @@ struct isp_operations {
int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
- void (*fw_dump) (struct scsi_qla_host *, int);
+ void (*fw_dump)(struct scsi_qla_host *vha);
+ void (*mpi_fw_dump)(struct scsi_qla_host *, int);
int (*beacon_on) (struct scsi_qla_host *);
int (*beacon_off) (struct scsi_qla_host *);
@@ -3456,8 +3492,8 @@ struct rsp_que {
dma_addr_t dma;
response_t *ring;
response_t *ring_ptr;
- uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
- uint32_t __iomem *rsp_q_out;
+ __le32 __iomem *rsp_q_in; /* FWI2-capable only. */
+ __le32 __iomem *rsp_q_out;
uint16_t ring_index;
uint16_t out_ptr;
uint16_t *in_ptr; /* queue shadow in index */
@@ -3483,8 +3519,8 @@ struct req_que {
dma_addr_t dma;
request_t *ring;
request_t *ring_ptr;
- uint32_t __iomem *req_q_in; /* FWI2-capable only. */
- uint32_t __iomem *req_q_out;
+ __le32 __iomem *req_q_in; /* FWI2-capable only. */
+ __le32 __iomem *req_q_out;
uint16_t ring_index;
uint16_t in_ptr;
uint16_t *out_ptr; /* queue shadow out index */
@@ -3552,7 +3588,7 @@ struct qla_qpair {
struct list_head hints_list;
uint16_t cpuid;
uint16_t retry_term_cnt;
- uint32_t retry_term_exchg_addr;
+ __le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -3579,98 +3615,98 @@ struct rdp_req_payload {
struct rdp_rsp_payload {
struct {
- uint32_t cmd;
- uint32_t len;
+ __be32 cmd;
+ __be32 len;
} hdr;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc2;
/* SFP diagnostic param descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t temperature;
- uint16_t vcc;
- uint16_t tx_bias;
- uint16_t tx_power;
- uint16_t rx_power;
- uint16_t sfp_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 temperature;
+ __be16 vcc;
+ __be16 tx_bias;
+ __be16 tx_power;
+ __be16 rx_power;
+ __be16 sfp_flags;
} sfp_diag_desc;
/* Port Speed Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t speed_capab;
- uint16_t operating_speed;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 speed_capab;
+ __be16 operating_speed;
} port_speed_desc;
/* Link Error Status Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 link_fail_cnt;
+ __be32 loss_sync_cnt;
+ __be32 loss_sig_cnt;
+ __be32 prim_seq_err_cnt;
+ __be32 inval_xmit_word_cnt;
+ __be32 inval_crc_cnt;
uint8_t pn_port_phy_type;
uint8_t reserved[3];
} ls_err_desc;
/* Port name description with diag param */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_diag_desc;
/* Port Name desc for Direct attached Fx_Port or Nx_Port */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_direct_desc;
/* Buffer Credit descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t fcport_b2b;
- uint32_t attached_fcport_b2b;
- uint32_t fcport_rtt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 fcport_b2b;
+ __be32 attached_fcport_b2b;
+ __be32 fcport_rtt;
} buffer_credit_desc;
/* Optical Element Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t high_alarm;
- uint16_t low_alarm;
- uint16_t high_warn;
- uint16_t low_warn;
- uint32_t element_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 high_alarm;
+ __be16 low_alarm;
+ __be16 high_warn;
+ __be16 low_warn;
+ __be32 element_flags;
} optical_elmt_desc[5];
/* Optical Product Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t vendor_name[16];
uint8_t part_number[16];
uint8_t serial_number[16];
@@ -3708,17 +3744,17 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
- uint32_t __iomem *atio_q_in;
- uint32_t __iomem *atio_q_out;
+ __le32 __iomem *atio_q_in;
+ __le32 __iomem *atio_q_out;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
- uint16_t saved_exchange_count;
- uint32_t saved_firmware_options_1;
- uint32_t saved_firmware_options_2;
- uint32_t saved_firmware_options_3;
+ __le16 saved_exchange_count;
+ __le32 saved_firmware_options_1;
+ __le32 saved_firmware_options_2;
+ __le32 saved_firmware_options_3;
uint8_t saved_firmware_options[2];
uint8_t saved_add_firmware_options[2];
@@ -3748,6 +3784,11 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+struct qla_hw_data_stat {
+ u32 num_fw_dump;
+ u32 num_mpi_reset;
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -4212,7 +4253,7 @@ struct qla_hw_data {
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
- uint16_t fw_seriallink_options24[4];
+ __le16 fw_seriallink_options24[4];
uint8_t serdes_version[3];
uint8_t mpi_version[3];
@@ -4230,7 +4271,6 @@ struct qla_hw_data {
uint32_t fw_dump_len;
u32 fw_dump_alloc_len;
bool fw_dumped;
- bool fw_dump_mpi;
unsigned long fw_dump_cap_flags;
#define RISC_PAUSE_CMPL 0
#define DMA_SHUTDOWN_CMPL 1
@@ -4241,6 +4281,10 @@ struct qla_hw_data {
#define ISP_MBX_RDY 6
#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
+ void *mpi_fw_dump;
+ u32 mpi_fw_dump_len;
+ unsigned int mpi_fw_dump_reading:1;
+ unsigned int mpi_fw_dumped:1;
int prev_minidump_failed;
dma_addr_t eft_dma;
void *eft;
@@ -4392,7 +4436,7 @@ struct qla_hw_data {
#define NUM_DSD_CHAIN 4096
uint8_t fw_type;
- __le32 file_prd_off; /* File firmware product offset */
+ uint32_t file_prd_off; /* File firmware product offset */
uint32_t md_template_size;
void *md_tmplt_hdr;
@@ -4454,6 +4498,8 @@ struct qla_hw_data {
uint16_t last_zio_threshold;
#define DEFAULT_ZIO_THRESHOLD 5
+
+ struct qla_hw_data_stat stat;
};
struct active_regions {
@@ -4698,13 +4744,13 @@ typedef struct scsi_qla_host {
struct qla27xx_image_status {
uint8_t image_status_mask;
- uint16_t generation;
+ __le16 generation;
uint8_t ver_major;
uint8_t ver_minor;
uint8_t bitmap; /* 28xx only */
uint8_t reserved[2];
- uint32_t checksum;
- uint32_t signature;
+ __le32 checksum;
+ __le32 signature;
} __packed;
/* 28xx aux image status bimap values */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index f9bad5bd7198..d1e12a29c3f7 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -134,28 +134,28 @@ struct vp_database_24xx {
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
+ __le16 nvram_version;
uint16_t reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- __le16 frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t hard_address;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t link_down_on_nos;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 link_down_on_nos;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
/* Offset 56. */
@@ -178,7 +178,7 @@ struct nvram_24xx {
* BIT 11-13 = Output Emphasis 4G
* BIT 14-15 = Reserved
*/
- uint16_t seriallink_options[4];
+ __le16 seriallink_options[4];
uint16_t reserved_2[16];
@@ -218,25 +218,25 @@ struct nvram_24xx {
*
* BIT 16-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
+ __le16 boot_lun_number;
uint16_t reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
+ __le16 alt1_boot_lun_number;
uint16_t reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
+ __le16 alt2_boot_lun_number;
uint16_t reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
+ __le16 alt3_boot_lun_number;
uint16_t reserved_11;
/*
@@ -249,23 +249,23 @@ struct nvram_24xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
uint16_t reserved_13;
- uint16_t boot_id_number;
+ __le16 boot_id_number;
uint16_t reserved_14;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint16_t reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
uint16_t reserved_16[3];
@@ -275,13 +275,13 @@ struct nvram_24xx {
uint8_t prev_drv_ver_minor;
uint8_t prev_drv_ver_subminor;
- uint16_t prev_bios_ver_major;
- uint16_t prev_bios_ver_minor;
+ __le16 prev_bios_ver_major;
+ __le16 prev_bios_ver_minor;
- uint16_t prev_efi_ver_major;
- uint16_t prev_efi_ver_minor;
+ __le16 prev_efi_ver_major;
+ __le16 prev_efi_ver_minor;
- uint16_t prev_fw_ver_major;
+ __le16 prev_fw_ver_major;
uint8_t prev_fw_ver_minor;
uint8_t prev_fw_ver_subminor;
@@ -309,7 +309,7 @@ struct nvram_24xx {
uint16_t subsystem_vendor_id;
uint16_t subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -318,46 +318,46 @@ struct nvram_24xx {
*/
#define ICB_VERSION 1
struct init_cb_24xx {
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t hard_address;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t link_down_on_nos; /* Milliseconds. */
+ __le16 link_down_on_nos; /* Milliseconds. */
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
__le64 prio_request_q_address __packed;
- uint16_t msix;
- uint16_t msix_atio;
+ __le16 msix;
+ __le16 msix_atio;
uint8_t reserved_2[4];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
- __le64 atio_q_address __packed;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
+ __le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0 = Enable Hard Loop Id
@@ -378,7 +378,7 @@ struct init_cb_24xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -399,7 +399,7 @@ struct init_cb_24xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0 = Reserved
@@ -425,9 +425,9 @@ struct init_cb_24xx {
* BIT 30 = Enable request queue 0 out index shadowing
* BIT 31 = Reserved
*/
- uint32_t firmware_options_3;
- uint16_t qos;
- uint16_t rid;
+ __le32 firmware_options_3;
+ __le16 qos;
+ __le16 rid;
uint8_t reserved_3[20];
};
@@ -443,27 +443,27 @@ struct cmd_bidir {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT hanlde. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Commnad timeout. */
+ __le16 timeout; /* Command timeout. */
- uint16_t wr_dseg_count; /* Write Data segment count. */
- uint16_t rd_dseg_count; /* Read Data segment count. */
+ __le16 wr_dseg_count; /* Write Data segment count. */
+ __le16 rd_dseg_count; /* Read Data segment count. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define BD_WRAP_BACK BIT_3
#define BD_READ_DATA BIT_1
#define BD_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */
uint16_t reserved[2]; /* Reserved */
- uint32_t rd_byte_count; /* Total Byte count Read. */
- uint32_t wr_byte_count; /* Total Byte count write. */
+ __le32 rd_byte_count; /* Total Byte count Read. */
+ __le32 wr_byte_count; /* Total Byte count write. */
uint8_t port_id[3]; /* PortID of destination port.*/
uint8_t vp_index;
@@ -480,28 +480,28 @@ struct cmd_type_6 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
/* Data segment address. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -518,16 +518,16 @@ struct cmd_type_7 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
#define FW_MAX_TIMEOUT 0x1999
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint16_t reserved_1;
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t task_mgmt_flags; /* Task management flags. */
+ __le16 task_mgmt_flags; /* Task management flags. */
#define TMF_CLEAR_ACA BIT_14
#define TMF_TARGET_RESET BIT_13
#define TMF_LUN_RESET BIT_12
@@ -547,7 +547,7 @@ struct cmd_type_7 {
uint8_t crn;
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -565,29 +565,29 @@ struct cmd_type_crc_2 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
+ __le16 crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
};
@@ -604,32 +604,32 @@ struct sts_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t ox_id; /* OX_ID used by the firmware. */
+ __le16 comp_status; /* Completion status. */
+ __le16 ox_id; /* OX_ID used by the firmware. */
- uint32_t residual_len; /* FW calc residual transfer length. */
+ __le32 residual_len; /* FW calc residual transfer length. */
union {
uint16_t reserved_1;
- uint16_t nvme_rsp_pyld_len;
+ __le16 nvme_rsp_pyld_len;
};
- uint16_t state_flags; /* State flags. */
+ __le16 state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- uint16_t retry_delay;
- uint16_t scsi_status; /* SCSI status. */
+ __le16 retry_delay;
+ __le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
- uint32_t rsp_residual_count; /* FCP RSP residual count. */
+ __le32 rsp_residual_count; /* FCP RSP residual count. */
- uint32_t sense_len; /* FCP SENSE length. */
+ __le32 sense_len; /* FCP SENSE length. */
union {
struct {
- uint32_t rsp_data_len; /* FCP response data length */
+ __le32 rsp_data_len; /* FCP response data length */
uint8_t data[28]; /* FCP rsp/sense information */
};
struct nvme_fc_ersp_iu nvme_ersp;
@@ -672,7 +672,7 @@ struct mrk_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint8_t modifier; /* Modifier (7-0). */
#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
@@ -701,24 +701,24 @@ struct ct_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t cmd_dsd_count;
+ __le16 cmd_dsd_count;
uint8_t vp_index;
uint8_t reserved_1;
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
uint16_t reserved_2;
- uint16_t rsp_dsd_count;
+ __le16 rsp_dsd_count;
uint8_t reserved_3[10];
- uint32_t rsp_byte_count;
- uint32_t cmd_byte_count;
+ __le32 rsp_byte_count;
+ __le32 cmd_byte_count;
struct dsd64 dsd[2];
};
@@ -733,17 +733,17 @@ struct purex_entry_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint16_t reserved1;
+ __le16 reserved1;
uint8_t vp_idx;
uint8_t reserved2;
- uint16_t status_flags;
- uint16_t nport_handle;
+ __le16 status_flags;
+ __le16 nport_handle;
- uint16_t frame_size;
- uint16_t trunc_frame_size;
+ __le16 frame_size;
+ __le16 trunc_frame_size;
- uint32_t rx_xchg_addr;
+ __le32 rx_xchg_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -754,13 +754,13 @@ struct purex_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
uint8_t els_frame_payload[20];
};
@@ -777,18 +777,18 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* response only */
- uint16_t nport_handle;
+ __le16 comp_status; /* response only */
+ __le16 nport_handle;
- uint16_t tx_dsd_count;
+ __le16 tx_dsd_count;
uint8_t vp_index;
uint8_t sof_type;
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t rx_dsd_count;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 rx_dsd_count;
uint8_t opcode;
uint8_t reserved_2;
@@ -796,7 +796,7 @@ struct els_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
@@ -817,10 +817,10 @@ struct els_entry_24xx {
__le32 rx_len; /* DSD 1 length. */
};
struct {
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
};
};
};
@@ -831,19 +831,19 @@ struct els_sts_entry_24xx {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t handle; /* System handle. */
+ __le32 handle; /* System handle. */
- uint16_t comp_status;
+ __le16 comp_status;
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t reserved_1;
+ __le16 reserved_1;
uint8_t vp_index;
uint8_t sof_type;
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t reserved_2;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 reserved_2;
uint8_t opcode;
uint8_t reserved_3;
@@ -851,13 +851,13 @@ struct els_sts_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le16 control_flags; /* Control flags. */
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
- uint32_t reserved_4[4];
+ __le32 reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -884,12 +884,12 @@ struct logio_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
/* Modifiers. */
#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
@@ -918,7 +918,7 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
- uint32_t io_parameter[11]; /* General I/O parameters. */
+ __le32 io_parameter[11]; /* General I/O parameters. */
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
@@ -946,17 +946,17 @@ struct tsk_mgmt_entry {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint16_t reserved_1;
- uint16_t delay; /* Activity delay in seconds. */
+ __le16 delay; /* Activity delay in seconds. */
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint32_t control_flags; /* Control Flags. */
+ __le32 control_flags; /* Control Flags. */
#define TCF_NOTMCMD_TO_TARGET BIT_31
#define TCF_LUN_RESET BIT_4
#define TCF_ABORT_TASK_SET BIT_3
@@ -981,15 +981,15 @@ struct abort_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
/* or Completion status. */
- uint16_t options; /* Options. */
+ __le16 options; /* Options. */
#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
uint32_t handle_to_abort; /* System handle to abort. */
- uint16_t req_que_no;
+ __le16 req_que_no;
uint8_t reserved_1[30];
uint8_t port_id[3]; /* PortID of destination port. */
@@ -1006,16 +1006,16 @@ struct abts_entry_24xx {
uint8_t handle_count;
uint8_t entry_status;
- uint32_t handle; /* type 0x55 only */
+ __le32 handle; /* type 0x55 only */
- uint16_t comp_status; /* type 0x55 only */
- uint16_t nport_handle; /* type 0x54 only */
+ __le16 comp_status; /* type 0x55 only */
+ __le16 nport_handle; /* type 0x54 only */
- uint16_t control_flags; /* type 0x55 only */
+ __le16 control_flags; /* type 0x55 only */
uint8_t vp_idx;
uint8_t sof_type; /* sof_type is upper nibble */
- uint32_t rx_xch_addr;
+ __le32 rx_xch_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -1026,30 +1026,30 @@ struct abts_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
+ __le16 rx_id;
+ __le16 ox_id;
- uint32_t param;
+ __le32 param;
union {
struct {
- uint32_t subcode3;
- uint32_t rsvd;
- uint32_t subcode1;
- uint32_t subcode2;
+ __le32 subcode3;
+ __le32 rsvd;
+ __le32 subcode1;
+ __le32 subcode2;
} error;
struct {
- uint16_t rsrvd1;
+ __le16 rsrvd1;
uint8_t last_seq_id;
uint8_t seq_id_valid;
- uint16_t aborted_rx_id;
- uint16_t aborted_ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 aborted_rx_id;
+ __le16 aborted_ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} ba_acc;
struct {
uint8_t vendor_unique;
@@ -1058,7 +1058,7 @@ struct abts_entry_24xx {
} ba_rjt;
} payload;
- uint32_t rx_xch_addr_to_abort;
+ __le32 rx_xch_addr_to_abort;
} __packed;
/* ABTS payload explanation values */
@@ -1087,7 +1087,7 @@ struct abts_entry_24xx {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_24xx {
- uint32_t flash_addr; /* Flash/NVRAM BIOS address. */
+ __le32 flash_addr; /* Flash/NVRAM BIOS address. */
#define FARX_DATA_FLAG BIT_31
#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA 0x7FF00000
@@ -1138,9 +1138,9 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
- uint32_t flash_data; /* Flash/NVRAM BIOS data. */
+ __le32 flash_data; /* Flash/NVRAM BIOS data. */
- uint32_t ctrl_status; /* Control/Status. */
+ __le32 ctrl_status; /* Control/Status. */
#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */
#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */
#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */
@@ -1166,35 +1166,35 @@ struct device_reg_24xx {
#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */
#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */
- uint32_t ictrl; /* Interrupt control. */
+ __le32 ictrl; /* Interrupt control. */
#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */
- uint32_t istatus; /* Interrupt status. */
+ __le32 istatus; /* Interrupt status. */
#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */
- uint32_t unused_1[2]; /* Gap. */
+ __le32 unused_1[2]; /* Gap. */
/* Request Queue. */
- uint32_t req_q_in; /* In-Pointer. */
- uint32_t req_q_out; /* Out-Pointer. */
+ __le32 req_q_in; /* In-Pointer. */
+ __le32 req_q_out; /* Out-Pointer. */
/* Response Queue. */
- uint32_t rsp_q_in; /* In-Pointer. */
- uint32_t rsp_q_out; /* Out-Pointer. */
+ __le32 rsp_q_in; /* In-Pointer. */
+ __le32 rsp_q_out; /* Out-Pointer. */
/* Priority Request Queue. */
- uint32_t preq_q_in; /* In-Pointer. */
- uint32_t preq_q_out; /* Out-Pointer. */
+ __le32 preq_q_in; /* In-Pointer. */
+ __le32 preq_q_out; /* Out-Pointer. */
- uint32_t unused_2[2]; /* Gap. */
+ __le32 unused_2[2]; /* Gap. */
/* ATIO Queue. */
- uint32_t atio_q_in; /* In-Pointer. */
- uint32_t atio_q_out; /* Out-Pointer. */
+ __le32 atio_q_in; /* In-Pointer. */
+ __le32 atio_q_out; /* Out-Pointer. */
- uint32_t host_status;
+ __le32 host_status;
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t hccr; /* Host command & control register. */
+ __le32 hccr; /* Host command & control register. */
/* HCCR statuses. */
#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
@@ -1216,7 +1216,7 @@ struct device_reg_24xx {
/* Clear RISC to PCI interrupt. */
#define HCCRX_CLR_RISC_INT 0xA0000000
- uint32_t gpiod; /* GPIO Data register. */
+ __le32 gpiod; /* GPIO Data register. */
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
@@ -1235,7 +1235,7 @@ struct device_reg_24xx {
/* Data in/out. */
#define GPDX_DATA_INOUT (BIT_1|BIT_0)
- uint32_t gpioe; /* GPIO Enable register. */
+ __le32 gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
/* Enable update mask. */
@@ -1243,56 +1243,56 @@ struct device_reg_24xx {
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
- uint32_t iobase_addr; /* I/O Bus Base Address register. */
-
- uint32_t unused_3[10]; /* Gap. */
-
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
-
- uint32_t iobase_window;
- uint32_t iobase_c4;
- uint32_t iobase_c8;
- uint32_t unused_4_1[6]; /* Gap. */
- uint32_t iobase_q;
- uint32_t unused_5[2]; /* Gap. */
- uint32_t iobase_select;
- uint32_t unused_6[2]; /* Gap. */
- uint32_t iobase_sdata;
+ __le32 iobase_addr; /* I/O Bus Base Address register. */
+
+ __le32 unused_3[10]; /* Gap. */
+
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+
+ __le32 iobase_window;
+ __le32 iobase_c4;
+ __le32 iobase_c8;
+ __le32 unused_4_1[6]; /* Gap. */
+ __le32 iobase_q;
+ __le32 unused_5[2]; /* Gap. */
+ __le32 iobase_select;
+ __le32 unused_6[2]; /* Gap. */
+ __le32 iobase_sdata;
};
/* RISC-RISC semaphore register PCI offet */
#define RISC_REGISTER_BASE_OFFSET 0x7010
-#define RISC_REGISTER_WINDOW_OFFET 0x6
+#define RISC_REGISTER_WINDOW_OFFSET 0x6
/* RISC-RISC semaphore/flag register (risc address 0x7016) */
@@ -1354,8 +1354,8 @@ struct mid_conf_entry_24xx {
struct mid_init_cb_24xx {
struct init_cb_24xx init_cb;
- uint16_t count;
- uint16_t options;
+ __le16 count;
+ __le16 options;
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
};
@@ -1389,27 +1389,27 @@ struct vp_ctrl_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t vp_idx_failed;
+ __le16 vp_idx_failed;
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
- uint16_t command;
+ __le16 command;
#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
- uint16_t vp_count;
+ __le16 vp_count;
uint8_t vp_idx_map[16];
- uint16_t flags;
- uint16_t id;
+ __le16 flags;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[24];
};
@@ -1425,12 +1425,12 @@ struct vp_config_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t flags;
+ __le16 flags;
#define CS_VF_BIND_VPORTS_TO_VF BIT_0
#define CS_VF_SET_QOS_OF_VPORTS BIT_1
#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */
#define CS_VCT_ERROR 0x03 /* Unknown error. */
@@ -1457,9 +1457,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- uint16_t id;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[2];
};
@@ -1486,7 +1486,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t resv1;
+ __le32 resv1;
uint8_t vp_acquired;
uint8_t vp_setup;
uint8_t vp_idx; /* Format 0=reserved */
@@ -1550,15 +1550,15 @@ struct vf_evfp_entry_24xx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t timeout; /* timeout */
- uint16_t adim_tagging_mode;
+ __le16 comp_status; /* Completion status. */
+ __le16 timeout; /* timeout */
+ __le16 adim_tagging_mode;
- uint16_t vfport_id;
+ __le16 vfport_id;
uint32_t exch_addr;
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t control_flags;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 control_flags;
uint32_t io_parameter_0;
uint32_t io_parameter_1;
__le64 tx_address __packed; /* Data segment 0 address. */
@@ -1573,13 +1573,13 @@ struct vf_evfp_entry_24xx {
struct qla_fdt_layout {
uint8_t sig[4];
- uint16_t version;
- uint16_t len;
- uint16_t checksum;
+ __le16 version;
+ __le16 len;
+ __le16 checksum;
uint8_t unused1[2];
uint8_t model[16];
- uint16_t man_id;
- uint16_t id;
+ __le16 man_id;
+ __le16 id;
uint8_t flags;
uint8_t erase_cmd;
uint8_t alt_erase_cmd;
@@ -1588,15 +1588,15 @@ struct qla_fdt_layout {
uint8_t wrt_sts_reg_cmd;
uint8_t unprotect_sec_cmd;
uint8_t read_man_id_cmd;
- uint32_t block_size;
- uint32_t alt_block_size;
- uint32_t flash_size;
- uint32_t wrt_enable_data;
+ __le32 block_size;
+ __le32 alt_block_size;
+ __le32 flash_size;
+ __le32 wrt_enable_data;
uint8_t read_id_addr_len;
uint8_t wrt_disable_bits;
uint8_t read_dev_id_len;
uint8_t chip_erase_cmd;
- uint16_t read_timeout;
+ __le16 read_timeout;
uint8_t protect_sec_cmd;
uint8_t unused2[65];
};
@@ -1605,11 +1605,11 @@ struct qla_fdt_layout {
struct qla_flt_location {
uint8_t sig[4];
- uint16_t start_lo;
- uint16_t start_hi;
+ __le16 start_lo;
+ __le16 start_hi;
uint8_t version;
uint8_t unused[5];
- uint16_t checksum;
+ __le16 checksum;
};
#define FLT_REG_FW 0x01
@@ -1664,19 +1664,19 @@ struct qla_flt_location {
#define FLT_REG_PEP_SEC_28XX 0xF1
struct qla_flt_region {
- uint16_t code;
+ __le16 code;
uint8_t attribute;
uint8_t reserved;
- uint32_t size;
- uint32_t start;
- uint32_t end;
+ __le32 size;
+ __le32 start;
+ __le32 end;
};
struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
+ __le16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 unused;
struct qla_flt_region region[0];
};
@@ -1688,18 +1688,18 @@ struct qla_flt_header {
struct qla_npiv_header {
uint8_t sig[2];
- uint16_t version;
- uint16_t entries;
- uint16_t unused[4];
- uint16_t checksum;
+ __le16 version;
+ __le16 entries;
+ __le16 unused[4];
+ __le16 checksum;
};
struct qla_npiv_entry {
- uint16_t flags;
- uint16_t vf_id;
+ __le16 flags;
+ __le16 vf_id;
uint8_t q_qos;
uint8_t f_qos;
- uint16_t unused1;
+ __le16 unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
};
@@ -1729,7 +1729,7 @@ struct verify_chip_entry_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define VCO_DONT_UPDATE_FW BIT_0
#define VCO_FORCE_UPDATE BIT_1
#define VCO_DONT_RESET_UPDATE BIT_2
@@ -1737,18 +1737,18 @@ struct verify_chip_entry_84xx {
#define VCO_END_OF_DATA BIT_14
#define VCO_ENABLE_DSD BIT_15
- uint16_t reserved_1;
+ __le16 reserved_1;
- uint16_t data_seg_cnt;
- uint16_t reserved_2[3];
+ __le16 data_seg_cnt;
+ __le16 reserved_2[3];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_3[3];
- uint32_t fw_size;
- uint32_t fw_seq_size;
- uint32_t relative_offset;
+ __le32 reserved_3[3];
+ __le32 fw_size;
+ __le32 fw_seq_size;
+ __le32 relative_offset;
struct dsd64 dsd;
};
@@ -1761,22 +1761,22 @@ struct verify_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
+ __le16 comp_status;
#define CS_VCS_CHIP_FAILURE 0x3
#define CS_VCS_BAD_EXCHANGE 0x8
#define CS_VCS_SEQ_COMPLETEi 0x40
- uint16_t failure_code;
+ __le16 failure_code;
#define VFC_CHECKSUM_ERROR 0x1
#define VFC_INVALID_LEN 0x2
#define VFC_ALREADY_IN_PROGRESS 0x8
- uint16_t reserved_1[4];
+ __le16 reserved_1[4];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_2[6];
+ __le32 reserved_2[6];
};
#define ACCESS_CHIP_IOCB_TYPE 0x2B
@@ -1788,24 +1788,24 @@ struct access_chip_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define ACO_DUMP_MEMORY 0x0
#define ACO_LOAD_MEMORY 0x1
#define ACO_CHANGE_CONFIG_PARAM 0x2
#define ACO_REQUEST_INFO 0x3
- uint16_t reserved1;
+ __le16 reserved1;
- uint16_t dseg_count;
- uint16_t reserved2[3];
+ __le16 dseg_count;
+ __le16 reserved2[3];
- uint32_t parameter1;
- uint32_t parameter2;
- uint32_t parameter3;
+ __le32 parameter1;
+ __le32 parameter2;
+ __le32 parameter3;
- uint32_t reserved3[3];
- uint32_t total_byte_cnt;
- uint32_t reserved4;
+ __le32 reserved3[3];
+ __le32 total_byte_cnt;
+ __le32 reserved4;
struct dsd64 dsd;
};
@@ -1818,11 +1818,11 @@ struct access_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
- uint16_t failure_code;
- uint32_t residual_count;
+ __le16 comp_status;
+ __le16 failure_code;
+ __le32 residual_count;
- uint32_t reserved[12];
+ __le32 reserved[12];
};
/* 81XX Support **************************************************************/
@@ -1877,52 +1877,52 @@ struct access_chip_rsp_84xx {
struct nvram_81xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
- uint16_t reserved_0;
+ __le16 nvram_version;
+ __le16 reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
- uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t reserved_2;
+ __le16 version;
+ __le16 reserved_1;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t reserved_3;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 reserved_3;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
- uint16_t reserved_4[4];
+ __le16 reserved_4[4];
/* Offset 64. */
uint8_t enode_mac[6];
- uint16_t reserved_5[5];
+ __le16 reserved_5[5];
/* Offset 80. */
- uint16_t reserved_6[24];
+ __le16 reserved_6[24];
/* Offset 128. */
- uint16_t ex_version;
+ __le16 ex_version;
uint8_t prio_fcf_matching_flags;
uint8_t reserved_6_1[3];
- uint16_t pri_fcf_vlan_id;
+ __le16 pri_fcf_vlan_id;
uint8_t pri_fcf_fabric_name[8];
- uint16_t reserved_6_2[7];
+ __le16 reserved_6_2[7];
uint8_t spma_mac_addr[6];
- uint16_t reserved_6_3[14];
+ __le16 reserved_6_3[14];
/* Offset 192. */
uint8_t min_supported_speed;
uint8_t reserved_7_0;
- uint16_t reserved_7[31];
+ __le16 reserved_7[31];
/*
* BIT 0 = Enable spinup delay
@@ -1955,26 +1955,26 @@ struct nvram_81xx {
* BIT 25 = Temp WWPN
* BIT 26-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
- uint16_t reserved_8;
+ __le16 boot_lun_number;
+ __le16 reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
- uint16_t reserved_9;
+ __le16 alt1_boot_lun_number;
+ __le16 reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
- uint16_t reserved_10;
+ __le16 alt2_boot_lun_number;
+ __le16 reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
- uint16_t reserved_11;
+ __le16 alt3_boot_lun_number;
+ __le16 reserved_11;
/*
* BIT 0 = Selective Login
@@ -1986,35 +1986,35 @@ struct nvram_81xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
- uint16_t reserved_13;
+ __le16 reserved_13;
- uint16_t boot_id_number;
- uint16_t reserved_14;
+ __le16 boot_id_number;
+ __le16 reserved_14;
- uint16_t max_luns_per_target;
- uint16_t reserved_15;
+ __le16 max_luns_per_target;
+ __le16 reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
- uint16_t reserved_16[3];
+ __le16 reserved_16[3];
/* Offset 352. */
uint8_t reserved_17[4];
- uint16_t reserved_18[5];
+ __le16 reserved_18[5];
uint8_t reserved_19[2];
- uint16_t reserved_20[8];
+ __le16 reserved_20[8];
/* Offset 384. */
uint8_t reserved_21[16];
- uint16_t reserved_22[3];
+ __le16 reserved_22[3];
/* Offset 406 (0x196) Enhanced Features
* BIT 0 = Extended BB credits for LR
@@ -2027,20 +2027,20 @@ struct nvram_81xx {
uint16_t reserved_24[4];
/* Offset 416. */
- uint16_t reserved_25[32];
+ __le16 reserved_25[32];
/* Offset 480. */
uint8_t model_name[16];
/* Offset 496. */
- uint16_t feature_mask_l;
- uint16_t feature_mask_h;
- uint16_t reserved_26[2];
+ __le16 feature_mask_l;
+ __le16 feature_mask_h;
+ __le16 reserved_26[2];
- uint16_t subsystem_vendor_id;
- uint16_t subsystem_device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -2049,31 +2049,31 @@ struct nvram_81xx {
*/
#define ICB_VERSION 1
struct init_cb_81xx {
- uint16_t version;
- uint16_t reserved_1;
+ __le16 version;
+ __le16 reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t reserved_2;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t reserved_3;
+ __le16 reserved_3;
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
@@ -2081,12 +2081,12 @@ struct init_cb_81xx {
uint8_t reserved_4[8];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
__le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0-3 = Reserved
@@ -2099,7 +2099,7 @@ struct init_cb_81xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -2117,7 +2117,7 @@ struct init_cb_81xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0-3 = Reserved
@@ -2138,7 +2138,7 @@ struct init_cb_81xx {
* BIT 28 = SPMA selection bit 1
* BIT 30-31 = Reserved
*/
- uint32_t firmware_options_3;
+ __le32 firmware_options_3;
uint8_t reserved_5[8];
@@ -2216,9 +2216,9 @@ struct qla_fcp_prio_cfg {
#define FCP_PRIO_ATTR_ENABLE 0x1
#define FCP_PRIO_ATTR_PERSIST 0x2
uint8_t reserved; /* Reserved for future use */
-#define FCP_PRIO_CFG_HDR_SIZE 0x10
- struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
-#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry)
+ struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */
+ uint8_t reserved2[16];
};
#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1b93f5b4d77d..061f91b521b3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -173,6 +173,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
+extern int ql2xfulldump_on_mpifail;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -636,15 +637,17 @@ extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
-extern void qla2100_fw_dump(scsi_qla_host_t *, int);
-extern void qla2300_fw_dump(scsi_qla_host_t *, int);
-extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla8044_fw_dump(scsi_qla_host_t *, int);
-
-extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+void qla2xxx_dump_fw(scsi_qla_host_t *vha);
+void qla2100_fw_dump(scsi_qla_host_t *vha);
+void qla2300_fw_dump(scsi_qla_host_t *vha);
+void qla24xx_fw_dump(scsi_qla_host_t *vha);
+void qla25xx_fw_dump(scsi_qla_host_t *vha);
+void qla81xx_fw_dump(scsi_qla_host_t *vha);
+void qla82xx_fw_dump(scsi_qla_host_t *vha);
+void qla8044_fw_dump(scsi_qla_host_t *vha);
+
+void qla27xx_fwdump(scsi_qla_host_t *vha);
+extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int);
extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *);
@@ -769,7 +772,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
-extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
@@ -871,7 +874,7 @@ extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
/* 83xx related functions */
-extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+void qla83xx_fw_dump(scsi_qla_host_t *vha);
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
@@ -933,5 +936,6 @@ extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index caa6b840e459..4576d3ae9937 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -120,7 +120,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
if (sp->cmd_sp)
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
- abt->u.abt.comp_status = CS_TIMEOUT;
+ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -992,7 +992,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
- __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ __func__, &wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff));
}
@@ -1343,7 +1343,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx->u.mbx.in = (void *)pd;
+ mbx->u.mbx.in = pd;
mbx->u.mbx.in_dma = pd_dma;
sp->done = qla24xx_async_gpdb_sp_done;
@@ -1791,7 +1791,7 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
- tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
tmf->u.tmf.data = QLA_FUNCTION_FAILED;
complete(&tmf->u.tmf.comp);
}
@@ -2219,7 +2219,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0)
+ if (rd_reg_word(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
(ha->flags.secure_adapter) ? "Yes" : "No");
@@ -2357,7 +2357,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2399,17 +2399,17 @@ qla2300_pci_config(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status);
/* Get the fb rev level */
ha->fb_rev = RD_FB_CMD_REG(ha, reg);
@@ -2418,13 +2418,13 @@ qla2300_pci_config(scsi_qla_host_t *vha)
pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x0);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x0);
+ rd_reg_word(&reg->ctrl_status);
/* Release RISC module. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
break;
udelay(10);
@@ -2439,7 +2439,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2483,7 +2483,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2587,36 +2587,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha)) {
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) &
+ if ((rd_reg_word(&reg->hccr) &
HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* FPM Soft Reset. */
- WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x100);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
/* Toggle Fpm Reset. */
if (!IS_QLA2200(ha)) {
- WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x0);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
}
/* Select frame buffer registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x10);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset frame buffer FIFOs. */
if (IS_QLA2200(ha)) {
@@ -2634,23 +2634,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
}
/* Select RISC module registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* Wait for RISC to recover from reset. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -2661,7 +2661,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*/
udelay(20);
for (cnt = 30000; cnt; cnt--) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
udelay(100);
@@ -2670,13 +2670,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
udelay(10);
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
@@ -2694,8 +2694,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/* Disable RISC pause on FPM parity error. */
if (!IS_QLA2100(ha)) {
- WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2740,32 +2740,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset RISC. */
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status),
- (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status),
+ (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2779,26 +2779,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
/* Wait for soft-reset to complete. */
- RD_REG_DWORD(&reg->ctrl_status);
+ rd_reg_dword(&reg->ctrl_status);
for (cnt = 0; cnt < 60; cnt++) {
barrier();
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(5);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -2817,17 +2817,17 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
}
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2840,8 +2840,8 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_WORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2860,9 +2860,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
-
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
}
static void
@@ -2870,8 +2869,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
}
static void
@@ -2887,7 +2886,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
vha->hw->pdev->subsystem_device != 0x0240)
return;
- WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
udelay(100);
attempt:
@@ -2989,7 +2988,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/*
* We need to have a delay here since the card will not respond while
@@ -2999,7 +2998,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
data = qla2x00_debounce_register(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
udelay(5);
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
barrier();
}
@@ -3010,8 +3009,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
"Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
/* Workaround for QLA2312 PCI parity error */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -3339,6 +3338,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
dump_size / 1024);
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->mpi_fw_dump = (char *)fw_dump +
+ ha->fwdt[1].dump_size;
mutex_unlock(&ha->optrom_mutex);
return;
}
@@ -3650,8 +3651,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3758,11 +3759,11 @@ enable_82xx_npiv:
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha))
/* SRAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else
/* SRAM, Instruction RAM and GP RAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4006,11 +4007,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
- RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
+ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
}
void
@@ -4072,15 +4073,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
}
icb->firmware_options_2 |= cpu_to_le32(BIT_23);
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
} else {
- WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp24.req_q_in, 0);
+ wrt_reg_dword(&reg->isp24.req_q_out, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
}
qlt_24xx_config_rings(vha);
@@ -4090,11 +4091,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_init, vha, 0x00fd,
"Speed set by user : %s Gbps \n",
qla2x00_get_link_speed_str(ha, ha->set_data_rate));
- icb->firmware_options_3 = (ha->set_data_rate << 13);
+ icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
}
/* PCI posting */
- RD_REG_DWORD(&ioreg->hccr);
+ rd_reg_word(&ioreg->hccr);
}
/**
@@ -4125,7 +4126,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req || !test_bit(que, ha->req_qid_map))
continue;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
*req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
@@ -4142,7 +4143,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que];
if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
/* Initialize response queue entries */
if (IS_QLAFX00(ha))
@@ -4181,12 +4182,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count);
ha->flags.dport_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_7) != 0;
ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
(ha->flags.dport_enabled) ? "enabled" : "disabled");
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -4565,7 +4568,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
- if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+ if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
@@ -5079,6 +5082,54 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ fc_port_t *fcport;
+ int rval;
+
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ __be32 *q = &ha->plogi_els_payld.data[0];
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ goto skip_login;
+ }
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->n2n_flag) {
+ qla24xx_fcport_handle_login(vha, fcport);
+ return QLA_SUCCESS;
+ }
+ }
+
+skip_login:
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ return QLA_FUNCTION_FAILED;
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5096,7 +5147,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
int found_devs;
int found;
fc_port_t *fcport, *new_fcport;
-
uint16_t index;
uint16_t entries;
struct gid_list_info *gid;
@@ -5106,47 +5156,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags;
/* Inititae N2N login. */
- if (N2N_TOPO(ha)) {
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha,
- ha->init_cb_dma, (void *)ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- __be32 *q = &ha->plogi_els_payld.data[0];
-
- bp = (uint32_t *)ha->init_cb;
- cpu_to_be32_array(q, bp, sz / 4);
-
- memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->n2n_flag) {
- qla24xx_fcport_handle_login(vha, fcport);
- return QLA_SUCCESS;
- }
- }
-skip_login:
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- }
- return QLA_FUNCTION_FAILED;
- }
+ if (N2N_TOPO(ha))
+ return qla2x00_configure_n2n_loop(vha);
found_devs = 0;
new_fcport = NULL;
@@ -7078,10 +7089,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -7102,10 +7113,10 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (IS_NOPOLLING_TYPE(ha))
@@ -7143,7 +7154,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_24xx *icb;
struct nvram_24xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -7171,7 +7182,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -7199,7 +7210,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->hard_address = cpu_to_le16(124);
@@ -7367,7 +7378,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Enable ZIO. */
if (!vha->flags.init_done) {
@@ -7435,7 +7446,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
{
- uint32_t *p = (void *)image_status;
+ __le32 *p = (__force __le32 *)image_status;
uint n = sizeof(*image_status) / sizeof(*p);
uint32_t sum = 0;
@@ -7498,7 +7509,7 @@ qla28xx_get_aux_images(
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2);
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
@@ -7531,7 +7542,7 @@ check_sec_image:
goto check_valid_image;
}
- qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2);
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
@@ -7596,7 +7607,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image;
}
- if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
QLA_SUCCESS) {
WARN_ON_ONCE(true);
@@ -7703,7 +7714,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr);
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
@@ -7716,18 +7727,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10);
- risc_addr = be32_to_cpu(dcode[2]);
- risc_size = be32_to_cpu(dcode[3]);
+ risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) {
*srisc_addr = risc_addr;
- risc_attr = be32_to_cpu(dcode[9]);
+ risc_attr = be32_to_cpu((__force __be32)dcode[9]);
}
dlen = ha->fw_transfer_size >> 2;
@@ -7767,9 +7778,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
fwdt->template = NULL;
fwdt->length = 0;
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7);
- risc_size = be32_to_cpu(dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n",
j, faddr, risc_size);
@@ -7838,7 +7849,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
- uint16_t *wcode, *fwcode;
+ uint16_t *wcode;
+ __be16 *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
struct qla_hw_data *ha = vha->hw;
@@ -7858,7 +7870,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wcode = (uint16_t *)req->ring;
*srisc_addr = 0;
- fwcode = (uint16_t *)blob->fw->data;
+ fwcode = (__force __be16 *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
@@ -7906,7 +7918,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
- wcode[i] = swab16(fwcode[i]);
+ wcode[i] = swab16((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
@@ -7943,7 +7955,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ulong i;
uint j;
struct fw_blob *blob;
- uint32_t *fwcode;
+ __be32 *fwcode;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct fwdt *fwdt = ha->fwdt;
@@ -7959,8 +7971,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- fwcode = (void *)blob->fw->data;
- dcode = fwcode;
+ fwcode = (__force __be32 *)blob->fw->data;
+ dcode = (__force uint32_t *)fwcode;
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n",
@@ -7971,7 +7983,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
@@ -7997,7 +8009,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dlen);
for (i = 0; i < dlen; i++)
- dcode[i] = swab32(fwcode[i]);
+ dcode[i] = swab32((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
@@ -8051,7 +8063,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode = fwdt->template;
for (i = 0; i < risc_size; i++)
- dcode[i] = fwcode[i];
+ dcode[i] = (__force u32)fwcode[i];
if (!qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0175,
@@ -8322,7 +8334,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_81xx *icb;
struct nvram_81xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -8369,7 +8381,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"primary" : "secondary");
ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -8396,7 +8408,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21;
@@ -8440,7 +8452,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (IS_T10_PI_CAPABLE(ha))
- nv->frame_payload_size &= ~7;
+ nv->frame_payload_size &= cpu_to_le16(~7);
qlt_81xx_config_nvram_stage1(vha, nv);
@@ -8603,10 +8615,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_0);
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Determine NVMe/FCP priority for target ports */
ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 364b3db8b2dc..1fb6ccac07cc 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -40,16 +40,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
* register value.
*/
static __inline__ uint16_t
-qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+qla2x00_debounce_register(volatile __le16 __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
do {
- first = RD_REG_WORD(addr);
+ first = rd_reg_word(addr);
barrier();
cpu_relax();
- second = RD_REG_WORD(addr);
+ second = rd_reg_word(addr);
} while (first != second);
return (first);
@@ -329,7 +329,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
} else
req->ring_ptr++;
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 182bd68c79ac..8865c35d3421 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -376,7 +376,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -428,8 +428,8 @@ qla2x00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -472,21 +472,21 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/* Set chip new ring index. */
if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
- WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->isp24.req_q_in);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
@@ -661,7 +661,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cur_dsd->address = 0;
cur_dsd->length = 0;
cur_dsd++;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
return 0;
}
@@ -755,8 +755,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
struct fw_dif_context {
- uint32_t ref_tag;
- uint16_t app_tag;
+ __le32 ref_tag;
+ __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
};
@@ -1389,7 +1389,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
struct dsd64 *cur_dsd;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t total_bytes = 0;
@@ -1456,7 +1456,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
@@ -1545,7 +1545,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
@@ -1637,7 +1637,7 @@ qla24xx_start_scsi(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1698,7 +1698,7 @@ qla24xx_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1822,7 +1822,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1881,7 +1881,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1957,7 +1957,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2018,7 +2018,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2157,7 +2157,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2214,7 +2214,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -2266,13 +2266,13 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = *req->out_ptr;
else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha))
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ cnt = rd_reg_dword(reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = rd_reg_dword(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
- cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+ cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -2305,8 +2305,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
if (IS_QLAFX00(ha)) {
- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
- WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+ wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt);
+ wrt_reg_word((void __iomem *)&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
@@ -2344,9 +2344,10 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
- logio->control_flags |= LCF_NVME_PRLI;
+ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
if (sp->vha->flags.nvme_first_burst)
- logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+ logio->io_parameter[0] =
+ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2680,7 +2681,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- els_iocb->tx_dsd_count = 1;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0;
@@ -2700,7 +2701,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
- els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_dsd_count = cpu_to_le16(1);
els_iocb->rx_byte_count = els_iocb->rx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
@@ -2712,7 +2713,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = 1 << 13;
+ els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2787,7 +2788,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct qla_work_evt *e;
struct fc_port *conflict_fcport;
port_id_t cid; /* conflict Nport id */
- u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
@@ -2800,7 +2801,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- switch (fw_status[0]) {
+ switch (le32_to_cpu(fw_status[0])) {
case CS_DATA_UNDERRUN:
case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
@@ -2810,9 +2811,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case CS_IOCB_ERROR:
- switch (fw_status[1]) {
+ switch (le32_to_cpu(fw_status[1])) {
case LSC_SCODE_PORTID_USED:
- lid = fw_status[2] & 0xffff;
+ lid = le32_to_cpu(fw_status[2]) & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(fcport->port_name),
fcport->d_id, lid, &conflict_fcport);
@@ -2846,9 +2847,11 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case LSC_SCODE_NPORT_USED:
- cid.b.domain = (fw_status[2] >> 16) & 0xff;
- cid.b.area = (fw_status[2] >> 8) & 0xff;
- cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
+ & 0xff;
+ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
+ & 0xff;
+ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
@@ -3022,7 +3025,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
@@ -3216,7 +3219,7 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
struct scsi_qla_host *vha = sp->vha;
@@ -3310,7 +3313,7 @@ sufficient_dsds:
req_cnt = 1;
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3398,7 +3401,7 @@ sufficient_dsds:
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
additional_cdb_len);
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
@@ -3419,7 +3422,7 @@ sufficient_dsds:
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3495,10 +3498,10 @@ sufficient_dsds:
if (ql2xdbwr)
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3536,7 +3539,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ abt_iocb->handle = make_handle(req->id, sp->handle);
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3544,10 +3547,10 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(make_handle(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ make_handle(le16_to_cpu(aio->u.abt.req_que_no),
+ aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
+ abt_iocb->req_que_no = aio->u.abt.req_que_no;
/* Send the command to the firmware */
wmb();
}
@@ -3562,7 +3565,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
for (i = 0; i < sz; i++)
- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
}
static void
@@ -3586,7 +3589,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3604,32 +3607,29 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
/*
* Build NVME LS request
*/
-static int
+static void
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
- int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
- cmd_pkt->tx_dseg_count = 1;
- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
- cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+ cmd_pkt->tx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
- cmd_pkt->rx_dseg_count = 1;
- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
- cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
+ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
-
- return rval;
}
static void
@@ -3894,7 +3894,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a78d395bbc8..cf0800546740 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -89,9 +89,9 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
/* terminate exchange */
rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1;
- rsp_els->nport_handle = ~0;
+ rsp_els->nport_handle = cpu_to_le16(~0);
rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
- rsp_els->control_flags = EPD_RX_XCHG;
+ rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
ql_dbg(ql_dbg_init, vha, 0x0283,
"Sending ELS Response to terminate exchange %#x...\n",
abts->rx_xch_addr_to_abort);
@@ -141,7 +141,7 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
abts_rsp->ox_id = abts->ox_id;
abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
- abts_rsp->payload.ba_acc.high_seq_cnt = ~0;
+ abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
ql_dbg(ql_dbg_init, vha, 0x028b,
"Sending BA ACC response to ABTS %#x...\n",
@@ -204,7 +204,7 @@ qla2100_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
if (hccr & HCCR_RISC_PAUSE) {
@@ -216,18 +216,18 @@ qla2100_intr_handler(int irq, void *dev_id)
* bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
- } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+ } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
break;
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
/* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0);
@@ -246,13 +246,13 @@ qla2100_intr_handler(int irq, void *dev_id)
mb[0]);
}
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- RD_REG_WORD(&reg->semaphore);
+ wrt_reg_word(&reg->semaphore, 0);
+ rd_reg_word(&reg->semaphore);
} else {
qla2x00_process_response_queue(rsp);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
}
qla2x00_handle_mbx_completion(ha, status);
@@ -324,14 +324,14 @@ qla2300_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026,
@@ -347,10 +347,10 @@ qla2300_intr_handler(int irq, void *dev_id)
* interrupt bit to be cleared. Schedule a big
* hammer to get out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@@ -365,7 +365,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status |= MBX_INTERRUPT;
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
break;
case 0x12:
mb[0] = MSW(stat);
@@ -393,8 +393,8 @@ qla2300_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD_RELAXED(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word_relaxed(&reg->hccr);
}
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -412,7 +412,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -428,15 +428,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+ wptr = MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+ wptr = MAILBOX_REG(ha, reg, 8);
if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
mboxes >>= 1;
@@ -451,19 +451,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */
if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
- wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ wptr = &reg24->mailbox1;
else if (IS_QLA8044(vha->hw))
- wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ wptr = &reg82->mailbox_out[1];
else
return;
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
- mb[cnt] = RD_REG_WORD(wptr);
+ mb[cnt] = rd_reg_word(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
"Inter-Driver Communication %s -- "
@@ -756,6 +756,39 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
return NULL;
}
+/* Shall be called only on supported adapters. */
+static void
+qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ bool reset_isp_needed = 0;
+
+ ql_log(ql_log_warn, vha, 0x02f0,
+ "MPI Heartbeat stop. MPI reset is%s needed. "
+ "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0] & BIT_8 ? "" : " not",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) == 0)
+ return;
+
+ ql_log(ql_log_warn, vha, 0x02f1,
+ "MPI Heartbeat stop. FW dump needed\n");
+
+ if (ql2xfulldump_on_mpifail) {
+ ha->isp_ops->fw_dump(vha);
+ reset_isp_needed = 1;
+ }
+
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+
+ if (reset_isp_needed) {
+ vha->hw->flags.fw_init_done = 0;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @vha: SCSI driver HA context
@@ -785,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handles[0] = make_handle(mb[2], mb[1]);
handle_cnt = 1;
break;
case MBA_CMPLT_1_16BIT:
@@ -824,10 +857,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
mb[0] = MBA_SCSI_COMPLETION;
break;
case MBA_CMPLT_2_32BIT:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
- handles[1] = le32_to_cpu(
- ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
- RD_MAILBOX_REG(ha, reg, 6));
+ handles[0] = make_handle(mb[2], mb[1]);
+ handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
+ RD_MAILBOX_REG(ha, reg, 6));
handle_cnt = 2;
mb[0] = MBA_SCSI_COMPLETION;
break;
@@ -858,10 +890,10 @@ skip_rio:
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
u16 m[4];
- m[0] = RD_REG_WORD(&reg24->mailbox4);
- m[1] = RD_REG_WORD(&reg24->mailbox5);
- m[2] = RD_REG_WORD(&reg24->mailbox6);
- mbx = m[3] = RD_REG_WORD(&reg24->mailbox7);
+ m[0] = rd_reg_word(&reg24->mailbox4);
+ m[1] = rd_reg_word(&reg24->mailbox5);
+ m[2] = rd_reg_word(&reg24->mailbox6);
+ mbx = m[3] = rd_reg_word(&reg24->mailbox7);
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
@@ -871,10 +903,10 @@ skip_rio:
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
mb[1], mb[2], mb[3]);
- ha->fw_dump_mpi =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- RD_REG_WORD(&reg24->mailbox7) & BIT_8;
- ha->isp_ops->fw_dump(vha, 1);
+ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ rd_reg_word(&reg24->mailbox7) & BIT_8)
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha);
@@ -979,8 +1011,8 @@ skip_rio:
ha->current_topology = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
- ? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ ? rd_reg_word(&reg24->mailbox4) : 0;
+ mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
: mbx;
ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -1347,7 +1379,7 @@ global_port_update:
break;
case MBA_IDC_NOTIFY:
if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
(mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
@@ -1374,25 +1406,12 @@ global_port_update:
case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- ha->flags.fw_init_done = 0;
- ql_log(ql_log_warn, vha, 0xffff,
- "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0], mb[1], mb[2], mb[3]);
-
- if ((mb[1] & BIT_8) ||
- (mb[2] & BIT_8)) {
- ql_log(ql_log_warn, vha, 0xd013,
- "MPI Heartbeat stop. FW dump needed\n");
- ha->fw_dump_mpi = 1;
- ha->isp_ops->fw_dump(vha, 1);
- }
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla27xx_handle_8200_aen(vha, mb);
} else if (IS_QLA83XX(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
+ mb[5] = rd_reg_word(&reg24->mailbox5);
+ mb[6] = rd_reg_word(&reg24->mailbox6);
+ mb[7] = rd_reg_word(&reg24->mailbox7);
qla83xx_handle_8200_aen(vha, mb);
} else {
ql_dbg(ql_dbg_async, vha, 0x5052,
@@ -1646,7 +1665,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
for (i = 0; i < sz; i++)
- si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
+ si->u.mbx.in_mb[i] = pkt->mb[i];
res = (si->u.mbx.in_mb[0] & MBS_MASK);
@@ -1747,6 +1766,7 @@ static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
+ struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
@@ -1796,23 +1816,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+ fw_status[1] = le32_to_cpu(ese->error_subcode_1);
+ fw_status[2] = le32_to_cpu(ese->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
- els->u.els_plogi.fw_status[0] = fw_status[0];
- els->u.els_plogi.fw_status[1] = fw_status[1];
- els->u.els_plogi.fw_status[2] = fw_status[2];
- els->u.els_plogi.comp_status = fw_status[0];
+ els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
+ els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
+ els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
+ els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
if (comp_status == CS_COMPLETE) {
res = DID_OK << 16;
} else {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- els->u.els_plogi.len =
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count);
+ els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
+ ese->total_byte_count));
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
@@ -1821,8 +1840,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
@@ -1838,23 +1856,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+ le32_to_cpu(ese->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
} else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_2));
+ le32_to_cpu(ese->error_subcode_1),
+ le32_to_cpu(ese->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
}
@@ -2062,7 +2077,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
- uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ __le16 comp_status = sts->comp_status;
int logit = 0;
iocb = &sp->u.iocb_cmd;
@@ -2093,7 +2108,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
(SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
/* Response already DMA'd to fd->rspaddr. */
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
} else if ((state_flags & SF_FCP_RSP_DMA)) {
/*
* Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
@@ -2110,8 +2125,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
- if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
+ if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
sizeof(struct nvme_fc_ersp_iu))) {
if (ql_mask_match(ql_dbg_io)) {
WARN_ONCE(1, "Unexpected response payload length %u.\n",
@@ -2121,9 +2136,9 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb->u.nvme.rsp_pyld_len);
}
iocb->u.nvme.rsp_pyld_len =
- sizeof(struct nvme_fc_ersp_iu);
+ cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
}
- iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
}
@@ -2138,7 +2153,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
"Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
tgt_xfer_len, fd->transferred_length);
logit = 1;
- } else if (comp_status == CS_DATA_UNDERRUN) {
+ } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
/*
* Do not log if this is just an underflow and there
* is no data loss.
@@ -2158,7 +2173,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
* If transport error then Failure (HBA rejects request)
* otherwise transport will handle.
*/
- switch (comp_status) {
+ switch (le16_to_cpu(comp_status)) {
case CS_COMPLETE:
break;
@@ -2300,7 +2315,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
/* Adjust ring index */
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
}
static inline void
@@ -2391,9 +2406,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's
*/
- if ((a_app_tag == T10_PI_APP_ESCAPE) &&
- ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
- (a_ref_tag == T10_PI_REF_ESCAPE))) {
+ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
+ (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
+ a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd);
@@ -2751,6 +2766,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
+ u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
+
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2765,11 +2782,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
/* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
- retry_delay = sts24->retry_delay & 0x3fff;
+ if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
+ retry_delay = sts24_retry_delay & 0x3fff;
ql_dbg(ql_dbg_io, sp->vha, 0x3033,
"%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24->retry_delay >> 14, retry_delay);
+ sts24_retry_delay >> 14, retry_delay);
}
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
@@ -3143,7 +3160,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -3159,11 +3176,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)&reg->mailbox1;
+ wptr = &reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
mboxes >>= 1;
wptr++;
@@ -3183,7 +3200,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = pkt->nport_handle;
sp->done(sp, 0);
}
@@ -3340,9 +3357,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
} else {
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
}
@@ -3359,13 +3376,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
return;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
- for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
+ for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3374,11 +3391,11 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto next_test;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
- for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
+ for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3387,13 +3404,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto done;
next_test:
- if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+ if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
ql_log(ql_log_info, vha, 0x504c,
"Additional code -- 0x55AA.\n");
done:
- WRT_REG_DWORD(&reg->iobase_window, 0x0000);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x0000);
+ rd_reg_dword(&reg->iobase_window);
}
/**
@@ -3437,14 +3454,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_warn, vha, 0x504b,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3452,7 +3469,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3469,9 +3486,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3491,8 +3508,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500);
}
@@ -3531,8 +3548,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3566,14 +3583,14 @@ qla24xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_info, vha, 0x5050,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3581,7 +3598,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3598,9 +3615,9 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3620,7 +3637,7 @@ qla24xx_msix_default(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3671,7 +3688,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
@@ -3932,7 +3949,7 @@ clear_risc_ints:
goto fail;
spin_lock_irq(&ha->hardware_lock);
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ wrt_reg_word(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d6c991bd1bde..df31ee0d59b2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -106,7 +106,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint16_t *iptr;
- uint16_t __iomem *optr;
+ __le16 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -208,11 +208,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
- optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ optr = &reg->isp82.mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
- optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+ optr = &reg->isp24.mailbox0;
else
- optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+ optr = MAILBOX_REG(ha, &reg->isp, 0);
iptr = mcp->mb;
command = mcp->mb[0];
@@ -222,12 +222,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- optr =
- (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+ optr = MAILBOX_REG(ha, &reg->isp, 8);
if (mboxes & BIT_0) {
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
- WRT_REG_WORD(optr, *iptr);
+ wrt_reg_word(optr, *iptr);
}
mboxes >>= 1;
@@ -253,11 +252,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha))
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
@@ -300,7 +299,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Cmd=%x Polling Mode.\n", command);
if (IS_P3P_TYPE(ha)) {
- if (RD_REG_DWORD(&reg->isp82.hint) &
+ if (rd_reg_dword(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -311,11 +310,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
} else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
@@ -413,14 +412,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
- mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
- mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
- mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
- mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
- ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
- host_status = RD_REG_DWORD(&reg->isp24.host_status);
- hccr = RD_REG_DWORD(&reg->isp24.hccr);
+ mb[0] = rd_reg_word(&reg->isp24.mailbox0);
+ mb[1] = rd_reg_word(&reg->isp24.mailbox1);
+ mb[2] = rd_reg_word(&reg->isp24.mailbox2);
+ mb[3] = rd_reg_word(&reg->isp24.mailbox3);
+ mb[7] = rd_reg_word(&reg->isp24.mailbox7);
+ ictrl = rd_reg_dword(&reg->isp24.ictrl);
+ host_status = rd_reg_dword(&reg->isp24.host_status);
+ hccr = rd_reg_dword(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
@@ -430,7 +429,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
- ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ictrl = rd_reg_word(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
@@ -462,7 +461,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* a dump
*/
if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
rval = QLA_FUNCTION_TIMEOUT;
}
}
@@ -573,15 +572,15 @@ mbx_done:
if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(&reg->isp24.host_status),
- RD_REG_DWORD(&reg->isp24.ictrl),
- RD_REG_DWORD(&reg->isp24.istatus));
+ rd_reg_dword(&reg->isp24.host_status),
+ rd_reg_dword(&reg->isp24.ictrl),
+ rd_reg_dword(&reg->isp24.istatus));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1206,
"ctrl_status=%#x ictrl=%#x istatus=%#x\n",
- RD_REG_WORD(&reg->isp.ctrl_status),
- RD_REG_WORD(&reg->isp.ictrl),
- RD_REG_WORD(&reg->isp.istatus));
+ rd_reg_word(&reg->isp.ctrl_status),
+ rd_reg_word(&reg->isp.ictrl),
+ rd_reg_word(&reg->isp.istatus));
}
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
@@ -3038,7 +3037,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw;
@@ -3097,7 +3096,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
@@ -3110,8 +3109,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
mc.mb[8] = dwords;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16(options);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = options;
rval = qla24xx_send_mb_cmd(vha, &mc);
@@ -3204,7 +3203,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
- if (abt->nport_handle == CS_IOCB_ERROR)
+ if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
rval = QLA_FUNCTION_PARAMETER_ERROR;
else
rval = QLA_FUNCTION_FAILED;
@@ -4427,9 +4426,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(req->req_q_in, 0);
+ wrt_reg_dword(req->req_q_in, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(req->req_q_out, 0);
+ wrt_reg_dword(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4498,9 +4497,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(rsp->rsp_q_out, 0);
+ wrt_reg_dword(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ wrt_reg_dword(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4727,7 +4726,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
mbx_cmd_t *mcp = &mc;
int i;
int len;
- uint16_t *str;
+ __le16 *str;
struct qla_hw_data *ha = vha->hw;
if (!IS_P3P_TYPE(ha))
@@ -4736,14 +4735,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
"Entered %s.\n", __func__);
- str = (void *)version;
+ str = (__force __le16 *)version;
len = strlen(version);
mcp->mb[0] = MBC_SET_RNID_PARAMS;
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
mcp->out_mb = MBX_1|MBX_0;
for (i = 4; i < 16 && len; i++, str++, len -= 2) {
- mcp->mb[i] = cpu_to_le16p(str);
+ mcp->mb[i] = le16_to_cpup(str);
mcp->out_mb |= 1<<i;
}
for (; i < 16; i++) {
@@ -4861,7 +4860,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = le32_to_cpu(*bp);
+ *bp = le32_to_cpu((__force __le32)*bp);
}
return rval;
@@ -5411,18 +5410,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Write the MBC data to the registers */
- WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
- WRT_REG_WORD(&reg->mailbox1, mb[0]);
- WRT_REG_WORD(&reg->mailbox2, mb[1]);
- WRT_REG_WORD(&reg->mailbox3, mb[2]);
- WRT_REG_WORD(&reg->mailbox4, mb[3]);
+ wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+ wrt_reg_word(&reg->mailbox1, mb[0]);
+ wrt_reg_word(&reg->mailbox2, mb[1]);
+ wrt_reg_word(&reg->mailbox3, mb[2]);
+ wrt_reg_word(&reg->mailbox4, mb[3]);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
/* Poll for MBC interrupt */
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (stat & HSRX_RISC_INT) {
stat &= 0xff;
@@ -5430,10 +5429,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb0 = RD_REG_WORD(&reg->mailbox0);
- WRT_REG_DWORD(&reg->hccr,
+ mb0 = rd_reg_word(&reg->mailbox0);
+ wrt_reg_dword(&reg->hccr,
HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rd_reg_dword(&reg->hccr);
break;
}
}
@@ -6211,7 +6210,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1144,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
}
@@ -6256,7 +6255,7 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
mcp->mb[4]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
if (subcode & BIT_5)
*sector_size = mcp->mb[1];
@@ -6470,13 +6469,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
- mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[1] = fcport->loop_id;
mc.mb[2] = MSW(pd_dma);
mc.mb[3] = LSW(pd_dma);
mc.mb[6] = MSW(MSD(pd_dma));
mc.mb[7] = LSW(MSD(pd_dma));
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16((uint16_t)opt);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = opt;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6587,7 +6586,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
mc.mb[6] = MSW(MSD(id_list_dma));
mc.mb[7] = LSW(MSD(id_list_dma));
mc.mb[8] = 0;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[9] = vha->vp_idx;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6613,8 +6612,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(1);
- mcp->mb[2] = cpu_to_le16(value);
+ mcp->mb[1] = 1;
+ mcp->mb[2] = value;
mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -6639,7 +6638,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(0);
+ mcp->mb[1] = 0;
mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d82e92da529a..15efe2f04b86 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -770,7 +770,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -884,7 +884,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index df99911b8bb9..a8fe4f725fa0 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -46,7 +46,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint32_t *iptr;
- uint32_t __iomem *optr;
+ __le32 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -109,7 +109,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+ optr = &reg->ispfx00.mailbox0;
iptr = mcp->mb;
command = mcp->mb[0];
@@ -117,7 +117,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- WRT_REG_DWORD(optr, *iptr);
+ wrt_reg_dword(optr, *iptr);
mboxes >>= 1;
optr++;
@@ -676,14 +676,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ wrt_reg_dword(&reg->req_q_in, 0);
+ wrt_reg_dword(&reg->req_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ wrt_reg_dword(&reg->rsp_q_in, 0);
+ wrt_reg_dword(&reg->rsp_q_out, 0);
/* PCI posting */
- RD_REG_DWORD(&reg->rsp_q_out);
+ rd_reg_dword(&reg->rsp_q_out);
}
char *
@@ -912,9 +912,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* 30 seconds wait - Adjust if required */
wait_time = 30;
- pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ pseudo_aen = rd_reg_dword(&reg->pseudoaen);
if (pseudo_aen == 1) {
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10);
@@ -925,7 +925,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
- aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx = rd_reg_dword(&reg->aenmailbox0);
barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx);
@@ -944,15 +944,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
- WRT_REG_DWORD(&reg->aenmailbox0, 0);
- RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
+ wrt_reg_dword(&reg->aenmailbox0, 0);
+ rd_reg_dword_relaxed(&reg->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -982,13 +982,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
* 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs
*/
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->initval1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
- ha->req_que_len = RD_REG_DWORD(&reg->initval5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ha->req_que_off = rd_reg_dword(&reg->initval1);
+ ha->rsp_que_off = rd_reg_dword(&reg->initval3);
+ ha->req_que_len = rd_reg_dword(&reg->initval5);
+ ha->rsp_que_len = rd_reg_dword(&reg->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -1034,7 +1034,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n",
- RD_REG_DWORD(&reg->aenmailbox7));
+ rd_reg_dword(&reg->aenmailbox7));
rval = QLA_FUNCTION_FAILED;
done = true;
break;
@@ -1428,7 +1428,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+ wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -1444,13 +1444,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha)
qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1495,7 +1495,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
(!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) {
- fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ fw_heart_beat = rd_reg_dword(&reg->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0;
@@ -1515,7 +1515,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */
- aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx0 = rd_reg_dword(&reg->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1710,10 +1710,9 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
return;
}
-int
+void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{
- int rval = 0;
uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1764,8 +1763,6 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data);
-
- return rval;
}
static void
@@ -2721,7 +2718,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0;
- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
@@ -2783,7 +2780,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
/**
@@ -2814,9 +2811,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2846,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
default:
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
- ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
- ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
- ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
- ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
+ ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
+ ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
+ ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
+ ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2872,7 +2869,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint32_t __iomem *wptr;
+ __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2882,10 +2879,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint32_t __iomem *)&reg->mailbox17;
+ wptr = &reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++;
}
}
@@ -2939,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
break;
if (stat & QLAFX00_INTR_MB_CMPLT) {
- mb[0] = RD_REG_WORD(&reg->mailbox16);
+ mb[0] = rd_reg_dword(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
}
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
- ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
}
@@ -3113,7 +3110,7 @@ qlafx00_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3178,7 +3175,7 @@ qlafx00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3205,7 +3202,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3215,7 +3212,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
sizeof(struct scsi_lun));
}
- memcpy((void *)ptm_iocb, &tm_iocb,
+ memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00));
wmb();
}
@@ -3231,13 +3228,12 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
- abt_iocb.abort_handle =
- cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.handle = make_handle(req->id, sp->handle);
+ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
- memcpy((void *)pabt_iocb, &abt_iocb,
+ memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00));
wmb();
}
@@ -3254,7 +3250,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE;
- fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4567f0c42486..762250891a8f 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -96,7 +96,7 @@ struct tsk_mgmt_entry_fx00 {
uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
uint32_t reserved_0;
@@ -121,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
- __le32 abort_handle; /* System handle. */
+ uint32_t abort_handle; /* System handle. */
__le32 reserved_2;
__le16 req_que_no;
@@ -166,7 +166,7 @@ struct fxdisc_entry_fx00 {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0; /* System handle. */
__le16 func_num;
@@ -359,47 +359,47 @@ struct config_info_data {
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value)
#define QLAFX00_CLR_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_RD_INTR_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 {
__le32 reserved_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 4886d247df6f..d66d47a0f958 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -138,7 +138,7 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
priv->sp = NULL;
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
@@ -295,7 +295,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
sp->put_fn = qla_nvme_release_ls_cmd_kref;
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
@@ -384,7 +384,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -426,11 +426,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags = CF_READ_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags = CF_WRITE_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
(sp->fcport->nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST)) {
@@ -438,7 +438,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
sp->fcport->nvme_first_burst_size) ||
(sp->fcport->nvme_first_burst_size == 0))
cmd_pkt->control_flags |=
- CF_NVME_FIRST_BURST_ENABLE;
+ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++;
@@ -514,7 +514,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
@@ -560,7 +560,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index ef912902d4e5..fbb844226630 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -48,26 +48,26 @@ struct cmd_nvme {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+ __le16 dseg_count; /* Data segment count. */
+ __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
- uint16_t control_flags; /* Control Flags */
+ __le16 control_flags; /* Control Flags */
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ __le16 nvme_cmnd_dseg_len; /* Data segment length. */
__le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */
__le64 nvme_rsp_dseg_address __packed; /* Data segment address. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -82,24 +82,24 @@ struct pt_ls4_request {
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
- uint16_t status;
- uint16_t nport_handle;
- uint16_t tx_dseg_count;
+ __le16 status;
+ __le16 nport_handle;
+ __le16 tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
- uint16_t timeout;
- uint16_t control_flags;
+ __le16 timeout;
+ __le16 control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
- uint16_t rx_dseg_count;
- uint16_t rsvd2;
- uint32_t exchange_address;
- uint32_t rsvd3;
- uint32_t rx_byte_count;
- uint32_t tx_byte_count;
+ __le16 rx_dseg_count;
+ __le16 rsvd2;
+ __le32 exchange_address;
+ __le32 rsvd3;
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
struct dsd64 dsd[2];
};
@@ -107,32 +107,32 @@ struct pt_ls4_request {
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
- uint16_t rsvd0;
- uint16_t rsvd1;
+ __le16 rsvd0;
+ __le16 rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
- uint16_t rsvd3;
- uint16_t nport_handle;
- uint16_t frame_size;
- uint16_t rsvd4;
- uint32_t exchange_address;
+ __le16 rsvd3;
+ __le16 nport_handle;
+ __le16 frame_size;
+ __le16 rsvd4;
+ __le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
be_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
- uint32_t desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- uint32_t desc_len;
- uint32_t payload[3];
+ __le32 desc_len;
+ __le32 payload[3];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 185c5f34d4c1..0baf55b7e88f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -370,7 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -380,47 +380,6 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
-static inline unsigned long
-qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
-{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- /* See if we are currently pointing to the region we want to use next */
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
- /* No need to change window. PCIX and PCIEregs are in both
- * regs are in both windows.
- */
- return off;
- }
-
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
- /* We are in first CRB window */
- if (ha->curr_window != 0)
- WARN_ON(1);
- return off;
- }
-
- if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
- /* We are in second CRB window */
- off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
-
- if (ha->curr_window != 1)
- return off;
-
- /* We are in the QM or direct access
- * register region - do nothing
- */
- if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
- (off < QLA82XX_PCI_CAMQM_MAX))
- return off;
- }
- /* strange address given */
- ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%s: Warning: unm_nic_pci_set_crbwindow "
- "called with an unknown address(%llx).\n",
- QLA2XXX_DRIVER_NAME, off);
- return off;
-}
-
static int
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
void __iomem **off_out)
@@ -520,7 +479,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
qla82xx_crb_win_lock(ha);
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
- data = RD_REG_DWORD(off);
+ data = rd_reg_dword(off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
@@ -937,17 +896,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
+ wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
/* Read back value to make sure write has gone through */
- RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+ wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
data);
else
- rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+ rval = rd_reg_dword(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase);
return rval;
@@ -1561,14 +1520,14 @@ qla82xx_get_table_desc(const u8 *unirom, int section)
uint32_t i;
struct qla82xx_uri_table_desc *directory =
(struct qla82xx_uri_table_desc *)&unirom[0];
- __le32 offset;
- __le32 tab_type;
- __le32 entries = cpu_to_le32(directory->num_entries);
+ uint32_t offset;
+ uint32_t tab_type;
+ uint32_t entries = le32_to_cpu(directory->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(directory->findex) +
- (i * cpu_to_le32(directory->entry_size));
- tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+ offset = le32_to_cpu(directory->findex) +
+ (i * le32_to_cpu(directory->entry_size));
+ tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8);
if (tab_type == section)
return (struct qla82xx_uri_table_desc *)&unirom[offset];
@@ -1582,16 +1541,17 @@ qla82xx_get_data_desc(struct qla_hw_data *ha,
u32 section, u32 idx_offset)
{
const u8 *unirom = ha->hablob->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+ int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] +
+ idx_offset);
struct qla82xx_uri_table_desc *tab_desc = NULL;
- __le32 offset;
+ uint32_t offset;
tab_desc = qla82xx_get_table_desc(unirom, section);
if (!tab_desc)
return NULL;
- offset = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * idx);
+ offset = le32_to_cpu(tab_desc->findex) +
+ (le32_to_cpu(tab_desc->entry_size) * idx);
return (struct qla82xx_uri_data_desc *)&unirom[offset];
}
@@ -1606,7 +1566,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha,
QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1620,7 +1580,7 @@ static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- return cpu_to_le32(uri_desc->size);
+ return le32_to_cpu(uri_desc->size);
}
return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
@@ -1636,7 +1596,7 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1790,9 +1750,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &icb->request_q_address);
put_unaligned_le64(rsp->dma, &icb->response_q_address);
- WRT_REG_DWORD(&reg->req_q_out[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
+ wrt_reg_dword(&reg->req_q_out[0], 0);
+ wrt_reg_dword(&reg->rsp_q_in[0], 0);
+ wrt_reg_dword(&reg->rsp_q_out[0], 0);
}
static int
@@ -1847,8 +1807,8 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
struct qla82xx_uri_table_desc *ptab_desc = NULL;
const uint8_t *unirom = ha->hablob->fw->data;
uint32_t i;
- __le32 entries;
- __le32 flags, file_chiprev, offset;
+ uint32_t entries;
+ uint32_t flags, file_chiprev, offset;
uint8_t chiprev = ha->chip_revision;
/* Hardcoding mn_present flag for P3P */
int mn_present = 0;
@@ -1859,14 +1819,14 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
if (!ptab_desc)
return -1;
- entries = cpu_to_le32(ptab_desc->num_entries);
+ entries = le32_to_cpu(ptab_desc->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(ptab_desc->findex) +
- (i * cpu_to_le32(ptab_desc->entry_size));
- flags = cpu_to_le32(*((int *)&unirom[offset] +
+ offset = le32_to_cpu(ptab_desc->findex) +
+ (i * le32_to_cpu(ptab_desc->entry_size));
+ flags = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_FLAGS_OFF));
- file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+ file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
@@ -1996,18 +1956,18 @@ void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+ wptr = &reg->mailbox_out[1];
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
}
@@ -2069,8 +2029,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2082,9 +2042,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2097,7 +2057,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -2135,11 +2095,11 @@ qla82xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2151,9 +2111,9 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2166,7 +2126,7 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
@@ -2196,11 +2156,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
reg = &ha->iobase->isp82;
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
@@ -2231,11 +2191,11 @@ qla82xx_poll(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
case 0x2:
@@ -2246,9 +2206,9 @@ qla82xx_poll(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2260,7 +2220,7 @@ qla82xx_poll(int irq, void *dev_id)
stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2549,8 +2509,8 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static uint32_t *
-qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+static __le32 *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t length)
{
uint32_t i;
@@ -2675,13 +2635,13 @@ qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
- qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ qla82xx_read_flash_data(vha, buf, offset, length);
scsi_unblock_requests(vha->host);
return buf;
}
static int
-qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr,
uint32_t faddr, uint32_t dwords)
{
int ret;
@@ -2758,7 +2718,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
}
ret = qla82xx_write_flash_dword(ha, faddr,
- cpu_to_le32(*dwptr));
+ le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_p3p, vha, 0xb020,
"Unable to program flash address=%x data=%x.\n",
@@ -2818,10 +2778,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
if (ql2xdbwr)
qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3724,7 +3684,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Minidump related functions */
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
struct qla82xx_md_entry_crb *crb_entry;
@@ -3841,12 +3801,12 @@ qla82xx_minidump_process_control(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_rdocm *ocm_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
@@ -3854,7 +3814,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
+ r_value = rd_reg_dword(r_addr + ha->nx_pcibase);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -3863,12 +3823,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
struct qla82xx_md_entry_mux *mux_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
@@ -3889,12 +3849,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_crb *crb_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
@@ -3912,7 +3872,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
@@ -3921,7 +3881,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
uint32_t c_value_w, c_value_r;
struct qla82xx_md_entry_cache *cache_hdr;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -3971,14 +3931,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
struct qla82xx_md_entry_cache *cache_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -4006,14 +3966,14 @@ qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
struct qla82xx_md_entry_queue *q_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
@@ -4036,13 +3996,13 @@ qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
struct qla82xx_md_entry_rdrom *rom_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
@@ -4062,7 +4022,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value, r_data;
@@ -4070,7 +4030,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
struct qla82xx_md_entry_rdmem *m_hdr;
unsigned long flags;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
@@ -4163,12 +4123,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
int no_entry_hdr = 0;
qla82xx_md_entry_hdr_t *entry_hdr;
struct qla82xx_md_template_hdr *tmplt_hdr;
- uint32_t *data_ptr;
+ __le32 *data_ptr;
uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
int i = 0, rval = QLA_FUNCTION_FAILED;
tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
- data_ptr = (uint32_t *)ha->md_dump;
+ data_ptr = ha->md_dump;
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xb037,
@@ -4177,7 +4137,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb038,
@@ -4357,7 +4317,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb044,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
md_failed:
@@ -4514,7 +4474,7 @@ exit:
}
void
-qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla82xx_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 230abee10598..93344a05910a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -800,16 +800,16 @@ struct qla82xx_legacy_intr_set {
#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
struct qla82xx_uri_table_desc{
- uint32_t findex;
- uint32_t num_entries;
- uint32_t entry_size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
};
struct qla82xx_uri_data_desc{
- uint32_t findex;
- uint32_t size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
};
/* UNIFIED ROMIMAGE END */
@@ -829,22 +829,22 @@ struct qla82xx_uri_data_desc{
* ISP 8021 I/O Register Set structure definitions.
*/
struct device_reg_82xx {
- uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
- uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
- uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+ __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */
+ __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */
- uint16_t mailbox_in[32]; /* Mail box In registers */
- uint16_t unused_1[32];
- uint32_t hint; /* Host interrupt register */
+ __le16 mailbox_in[32]; /* Mailbox In registers */
+ __le16 unused_1[32];
+ __le32 hint; /* Host interrupt register */
#define HINT_MBX_INT_PENDING BIT_0
- uint16_t unused_2[62];
- uint16_t mailbox_out[32]; /* Mail box Out registers */
- uint32_t unused_3[48];
+ __le16 unused_2[62];
+ __le16 mailbox_out[32]; /* Mailbox Out registers */
+ __le32 unused_3[48];
- uint32_t host_status; /* host status */
+ __le32 host_status; /* host status */
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t host_int; /* Interrupt status. */
+ __le32 host_int; /* Interrupt status. */
#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
};
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index c056f466f1f4..50e57603ce3d 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1441,7 +1441,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
(idc_ctrl & ~GRACEFUL_RESET_BIT1));
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
}
dev_ready:
@@ -2965,7 +2965,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
/* Prepare: Write pex-dma descriptor to MS memory. */
rval = qla8044_ms_mem_write_128b(vha,
- m_hdr->desc_card_addr, (void *)&dma_desc,
+ m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
(sizeof(struct qla8044_pex_dma_descriptor)/16));
if (rval) {
ql_log(ql_log_warn, vha, 0xb14a,
@@ -2987,7 +2987,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
read_size += chunk_size;
}
- *d_ptr = (void *)data_ptr;
+ *d_ptr = (uint32_t *)data_ptr;
error_exit:
if (rdmem_buffer)
@@ -3249,7 +3249,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb10e,
@@ -3470,7 +3470,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb110,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
@@ -3487,7 +3487,7 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!qla8044_collect_md_data(vha)) {
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
ha->prev_minidump_failed = 0;
} else {
ql_log(ql_log_fatal, vha, 0xb0db,
@@ -3946,8 +3946,8 @@ qla8044_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -3961,9 +3961,9 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -3976,7 +3976,7 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -4070,7 +4070,7 @@ exit_isp_reset:
}
void
-qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla8044_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1d9a4866f9a7..e92fad99338c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,11 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+int ql2xfulldump_on_mpifail;
+module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ "Set this to take full dump on MPI hang.");
+
/*
* CT6 CTX allocation cache
*/
@@ -1216,9 +1221,9 @@ uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
if (IS_P3P_TYPE(ha))
- return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
else
- return ((RD_REG_DWORD(&reg->host_status)) ==
+ return ((rd_reg_dword(&reg->host_status)) ==
ISP_REG_DISCONNECT);
}
@@ -1902,8 +1907,8 @@ qla2x00_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1917,8 +1922,8 @@ qla2x00_disable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, 0);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, 0);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1930,8 +1935,8 @@ qla24xx_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1945,8 +1950,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -2518,6 +2523,7 @@ static struct isp_operations qla27xx_isp_ops = {
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla27xx_fwdump,
+ .mpi_fw_dump = qla27xx_mpi_fwdump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
@@ -4614,7 +4620,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
ha->fw_dump_cap_flags = 0;
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
@@ -5758,7 +5764,8 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
if (!pdb) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed allocate pdb\n", __func__);
- } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) {
+ } else if (qla24xx_get_port_database(vha,
+ le16_to_cpu(purex->nport_handle), pdb)) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed get pdb sid=%x\n", __func__, sid);
} else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
@@ -5910,7 +5917,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
"-------- ELS REQ -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
- (void *)purex, sizeof(*purex));
+ purex, sizeof(*purex));
if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
rsp_payload_length =
@@ -5952,7 +5959,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->entry_status = 0;
rsp_els->handle = 0;
rsp_els->nport_handle = purex->nport_handle;
- rsp_els->tx_dsd_count = 1;
+ rsp_els->tx_dsd_count = cpu_to_le16(1);
rsp_els->vp_index = purex->vp_idx;
rsp_els->sof_type = EST_SOFI3;
rsp_els->rx_xchg_address = purex->rx_xchg_addr;
@@ -5963,7 +5970,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->d_id[1] = purex->s_id[1];
rsp_els->d_id[2] = purex->s_id[2];
- rsp_els->control_flags = EPD_ELS_ACC;
+ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
rsp_els->rx_byte_count = 0;
rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
@@ -5975,8 +5982,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
/* Prepare Response Payload */
rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
- rsp_payload->hdr.len = cpu_to_be32(
- rsp_els->tx_byte_count - sizeof(rsp_payload->hdr));
+ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
+ sizeof(rsp_payload->hdr));
/* Link service Request Info Descriptor */
rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
@@ -6026,7 +6033,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
rsp_payload->sfp_diag_desc.temperature = trx[0];
rsp_payload->sfp_diag_desc.vcc = trx[1];
rsp_payload->sfp_diag_desc.tx_bias = trx[2];
@@ -6053,17 +6060,17 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
if (!rval) {
rsp_payload->ls_err_desc.link_fail_cnt =
- cpu_to_be32(stat->link_fail_cnt);
+ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
rsp_payload->ls_err_desc.loss_sync_cnt =
- cpu_to_be32(stat->loss_sync_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
rsp_payload->ls_err_desc.loss_sig_cnt =
- cpu_to_be32(stat->loss_sig_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
rsp_payload->ls_err_desc.prim_seq_err_cnt =
- cpu_to_be32(stat->prim_seq_err_cnt);
+ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
rsp_payload->ls_err_desc.inval_xmit_word_cnt =
- cpu_to_be32(stat->inval_xmit_word_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
rsp_payload->ls_err_desc.inval_crc_cnt =
- cpu_to_be32(stat->inval_crc_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
}
}
@@ -6135,7 +6142,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
/* Optical Element Descriptor, Temperature */
rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
@@ -6261,11 +6268,11 @@ send:
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
"-------- ELS RSP -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
- (void *)rsp_els, sizeof(*rsp_els));
+ rsp_els, sizeof(*rsp_els));
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
"-------- ELS RSP PAYLOAD -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
- (void *)rsp_payload, rsp_payload_length);
+ rsp_payload, rsp_payload_length);
rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
@@ -6871,6 +6878,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
@@ -7550,15 +7558,15 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
- stat = RD_REG_DWORD(&reg->hccr);
+ stat = rd_reg_word(&reg->hccr);
if (stat & HCCR_RISC_PAUSE)
risc_paused = 1;
} else if (IS_QLA23XX(ha)) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED)
risc_paused = 1;
} else if (IS_FWI2_CAPABLE(ha)) {
- stat = RD_REG_DWORD(&reg24->host_status);
+ stat = rd_reg_dword(&reg24->host_status);
if (stat & HSRX_RISC_PAUSED)
risc_paused = 1;
}
@@ -7567,7 +7575,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (risc_paused) {
ql_log(ql_log_info, base_vha, 0x9003,
"RISC paused -- mmio_enabled, Dumping firmware.\n");
- ha->isp_ops->fw_dump(base_vha, 0);
+ qla2xxx_dump_fw(base_vha);
return PCI_ERS_RESULT_NEED_RESET;
} else
@@ -7814,13 +7822,19 @@ qla2x00_module_init(void)
{
int ret = 0;
+ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
BUILD_BUG_ON(sizeof(request_t) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
@@ -7828,17 +7842,70 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
+ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
+ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
+ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
+ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
+ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
+ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
+ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
+ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
+ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
+ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
+ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
+ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
- BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
- BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sw_info_t) != 32);
+ BUILD_BUG_ON(sizeof(target_id_t) != 2);
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3da79ee1d88e..e161c05d7d82 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -26,24 +26,24 @@ qla2x00_lock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
while (data & NVR_BUSY) {
udelay(100);
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
}
/* Lock resource */
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
while ((data & BIT_0) == 0) {
/* Lock failed */
udelay(100);
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
}
@@ -58,8 +58,8 @@ qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
@@ -73,15 +73,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -120,21 +120,21 @@ qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
/* Read data from NVRAM. */
for (cnt = 0; cnt < 16; cnt++) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
data <<= 1;
- reg_data = RD_REG_WORD(&reg->nvram);
+ reg_data = rd_reg_word(&reg->nvram);
if (reg_data & NVR_DATA_IN)
data |= BIT_0;
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
/* Deselect chip. */
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
return data;
@@ -171,8 +171,8 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -183,7 +183,7 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
* @data: word to program
*/
static void
-qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data)
{
int count;
uint16_t word;
@@ -202,7 +202,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -216,8 +216,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -226,7 +226,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
qla2x00_nv_deselect(ha);
@@ -241,7 +241,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
static int
qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
- uint16_t data, uint32_t tmo)
+ __le16 data, uint32_t tmo)
{
int ret, count;
uint16_t word;
@@ -261,7 +261,7 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -275,11 +275,11 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
do {
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
if (!--tmo) {
ret = QLA_FUNCTION_FAILED;
break;
@@ -308,7 +308,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
int ret, stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
- uint16_t wprot, wprot_old;
+ __le16 wprot, wprot_old;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
@@ -318,7 +318,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
cpu_to_le16(0x1234), 100000);
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
- if (stat != QLA_SUCCESS || wprot != 0x1234) {
+ if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) {
/* Write enable. */
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -347,8 +347,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -357,7 +357,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
if (wait_cnt)
@@ -407,8 +407,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -417,7 +417,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
}
@@ -456,11 +456,11 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 30000;
- WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
while (cnt--) {
- if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
- *data = RD_REG_DWORD(&reg->flash_data);
+ if (rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG) {
+ *data = rd_reg_dword(&reg->flash_data);
return QLA_SUCCESS;
}
udelay(10);
@@ -499,11 +499,11 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 500000;
- WRT_REG_DWORD(&reg->flash_data, data);
- WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_data, data);
+ wrt_reg_dword(&reg->flash_addr, addr | FARX_DATA_FLAG);
while (cnt--) {
- if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+ if (!(rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG))
return QLA_SUCCESS;
udelay(10);
cond_resched();
@@ -549,11 +549,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
- uint16_t cnt, chksum, *wptr;
+ uint16_t cnt, chksum;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct qla_flt_location *fltl = (void *)req->ring;
- uint32_t *dcode = (void *)req->ring;
+ uint32_t *dcode = (uint32_t *)req->ring;
uint8_t *buf = (void *)req->ring, *bcode, last_image;
/*
@@ -610,7 +611,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
if (memcmp(fltl->sig, "QFLT", 4))
goto end;
- wptr = (void *)req->ring;
+ wptr = (__force __le16 *)req->ring;
cnt = sizeof(*fltl) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
@@ -671,7 +672,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
struct qla_flt_header *flt = ha->flt;
struct qla_flt_region *region = &flt->region[0];
- uint16_t *wptr, cnt, chksum;
+ __le16 *wptr;
+ uint16_t cnt, chksum;
uint32_t start;
/* Assign FCP prio region since older adapters may not have FLT, or
@@ -681,8 +683,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)ha->flt;
- ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+ wptr = (__force __le16 *)ha->flt;
+ ha->isp_ops->read_optrom(vha, flt, flt_addr << 2,
(sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
if (le16_to_cpu(*wptr) == 0xffff)
@@ -949,7 +951,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
- uint16_t *wptr = (void *)req->ring;
+ __le16 *wptr = (__force __le16 *)req->ring;
struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
@@ -1042,14 +1044,14 @@ static void
qla2xxx_get_idc_param(scsi_qla_host_t *vha)
{
#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
- uint32_t *wptr;
+ __le32 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
if (!(IS_P3P_TYPE(ha)))
return;
- wptr = (uint32_t *)req->ring;
+ wptr = (__force __le32 *)req->ring;
ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
if (*wptr == cpu_to_le32(0xffffffff)) {
@@ -1095,7 +1097,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
- uint16_t *wptr;
+ __le16 *wptr;
uint16_t cnt, chksum;
int i;
struct qla_npiv_header hdr;
@@ -1197,9 +1199,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t *vha)
return qla81xx_fac_do_write_enable(vha, 1);
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
if (!ha->fdt_wrt_disable)
goto done;
@@ -1240,8 +1242,8 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
skip_wrt_protect:
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
return QLA_SUCCESS;
}
@@ -1265,7 +1267,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
}
static int
-qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
@@ -1352,7 +1354,7 @@ next:
/* Slow write */
ret = qla24xx_write_flash_dword(ha,
- flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+ flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
"Failed slopw write %x (%x)\n", faddr, *dwptr);
@@ -1379,11 +1381,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
- uint16_t *wptr;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
/* Word reads to NVRAM via registers. */
- wptr = (uint16_t *)buf;
+ wptr = buf;
qla2x00_lock_nvram_access(ha);
for (i = 0; i < bytes >> 1; i++, naddr++)
wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
@@ -1456,7 +1458,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t *dwptr = buf;
+ __le32 *dwptr = buf;
uint32_t i;
int ret;
@@ -1466,9 +1468,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
return ret;
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
/* Disable NVRAM write-protection. */
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
@@ -1478,7 +1480,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
naddr = nvram_data_addr(ha, naddr);
bytes >>= 2;
for (i = 0; i < bytes; i++, naddr++, dwptr++) {
- if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
+ if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) {
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr);
@@ -1490,9 +1492,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
return ret;
}
@@ -1588,8 +1590,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
/* Set the modified gpio_enable values */
@@ -1598,8 +1600,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
qla2x00_flip_colors(ha, &led_color);
@@ -1614,8 +1616,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1645,8 +1647,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
gpio_enable |= GPIO_LED_MASK;
@@ -1654,8 +1656,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
/* Clear out previously set LED colour. */
@@ -1663,8 +1665,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1731,13 +1733,13 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
/* Save the Original GPIOD. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Set the color bits. */
qla24xx_flip_colors(ha, &led_color);
@@ -1749,8 +1751,8 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
gpio_data |= led_color;
/* Set the modified gpio_data values. */
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1881,12 +1883,12 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1929,12 +1931,12 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Disable the gpio_data reg for update. */
gpio_data &= ~GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_fw_options:
@@ -1970,10 +1972,10 @@ qla2x00_flash_enable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data |= CSR_FLASH_ENABLE;
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -1986,10 +1988,10 @@ qla2x00_flash_disable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data &= ~(CSR_FLASH_ENABLE);
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -2008,7 +2010,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
@@ -2016,11 +2018,11 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- data = RD_REG_WORD(&reg->flash_data);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ data = rd_reg_word(&reg->flash_data);
return (uint8_t)data;
}
@@ -2028,13 +2030,13 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2049,7 +2051,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
data = qla2x00_debounce_register(&reg->flash_data);
}
@@ -2068,20 +2070,20 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
/* clear out Module Select and Flash Address bits [19:16]. */
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
return;
}
@@ -2089,13 +2091,13 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2103,10 +2105,10 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
}
@@ -2289,12 +2291,12 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
midpoint = length / 2;
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram);
for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
if (ilength == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
data = qla2x00_read_flash_byte(ha, saddr);
if (saddr % 100)
@@ -2319,11 +2321,11 @@ qla2x00_suspend_hba(struct scsi_qla_host *vha)
/* Pause RISC. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ rd_reg_word(&reg->hccr);
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
@@ -2362,12 +2364,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
midpoint = ha->optrom_size / 2;
qla2x00_flash_enable(ha);
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
for (addr = offset, data = buf; addr < length; addr++, data++) {
if (addr == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
}
*data = qla2x00_read_flash_byte(ha, addr);
@@ -2399,7 +2401,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_number = 0;
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
/* Go with write. */
@@ -2548,8 +2550,8 @@ update_flash:
}
}
} else if (addr == ha->optrom_size / 2) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
if (flash_id == 0xda && man_id == 0xc1) {
@@ -2610,7 +2612,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2662,7 +2664,7 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
for (; cnt; cnt--, flt_reg++) {
- if (flt_reg->start == start) {
+ if (le32_to_cpu(flt_reg->start) == start) {
memcpy((uint8_t *)region, flt_reg,
sizeof(struct qla_flt_region));
rval = QLA_SUCCESS;
@@ -2691,7 +2693,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_flt_region region;
bool reset_to_rom = false;
uint32_t risc_size, risc_attr = 0;
- uint32_t *fw_array = NULL;
+ __be32 *fw_array = NULL;
/* Retrieve region info - must be a start address passed in */
rval = qla28xx_get_flash_region(vha, offset, &region);
@@ -2722,12 +2724,12 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Region %x is secure\n", region.code);
- switch (region.code) {
+ switch (le16_to_cpu(region.code)) {
case FLT_REG_FW:
case FLT_REG_FW_SEC_27XX:
case FLT_REG_MPI_PRI_28XX:
case FLT_REG_MPI_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2761,7 +2763,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
case FLT_REG_PEP_PRI_28XX:
case FLT_REG_PEP_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2892,7 +2894,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (region.attribute && buf_size_without_sfub) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Sending Secure Flash MB Cmd\n");
- rval = qla28xx_secure_flash_update(vha, 0, region.code,
+ rval = qla28xx_secure_flash_update(vha, 0,
+ le16_to_cpu(region.code),
buf_size_without_sfub, sfub_dma,
sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
@@ -2981,11 +2984,11 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
/* Go with write. */
if (IS_QLA28XX(ha))
- rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla28xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
else
- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla24xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
scsi_unblock_requests(vha->host);
@@ -3513,7 +3516,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
for (i = 0; i < 4; i++)
- ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
+ ha->fw_revision[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
@@ -3528,7 +3532,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
faddr = ha->flt_region_gold_fw;
- qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
@@ -3537,7 +3541,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
for (i = 0; i < 4; i++)
- ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
+ ha->gold_fw_version[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
return ret;
}
@@ -3617,7 +3622,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
/* read remaining FCP CMD config data from flash */
fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
- len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry);
max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 622e7337affc..fbb80a043b4f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -378,7 +378,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
qlt_issue_marker(vha, ha_locked);
if ((entry->u.isp24.vp_index != 0xFF) &&
- (entry->u.isp24.nport_handle != 0xFFFF)) {
+ (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
host = qlt_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
@@ -1697,7 +1697,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1725,7 +1725,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl, h;
+ __le32 f_ctl;
+ uint32_t h;
uint8_t *p;
int rc;
struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
@@ -1782,7 +1783,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -1814,7 +1815,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl;
+ __le32 f_ctl;
uint8_t *p;
ql_dbg(ql_dbg_tgt, vha, 0xe006,
@@ -1857,7 +1858,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -2030,7 +2031,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
+ tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
break;
default:
tag = 0;
@@ -2110,7 +2111,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_cmd *abort_cmd;
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
- abts->exchange_addr_to_abort);
+ le32_to_cpu(abts->exchange_addr_to_abort));
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
@@ -2133,7 +2134,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- uint32_t tag = abts->exchange_addr_to_abort;
+ uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
be_id_t s_id;
int rc;
unsigned long flags;
@@ -2223,7 +2224,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2280,7 +2281,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE;
- ctio->nport_handle = cmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2484,7 +2485,7 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out));
+ rd_reg_dword_relaxed(req->req_q_out));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -2840,10 +2841,14 @@ skip_explict_conf:
cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
- for (i = 0; i < prm->sense_buffer_len/4; i++)
- ((uint32_t *)ctio->u.status1.sense_data)[i] =
- cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+ for (i = 0; i < prm->sense_buffer_len/4; i++) {
+ uint32_t v;
+ v = get_unaligned_be32(
+ &((uint32_t *)prm->sense_buffer)[i]);
+ put_unaligned_le32(v,
+ &((uint32_t *)ctio->u.status1.sense_data)[i]);
+ }
qlt_print_dif_err(prm);
} else {
@@ -3114,7 +3119,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
- pkt->dseg_count = prm->tot_dsds;
+ pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
@@ -3136,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
- pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
if (!bundling) {
cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
@@ -3573,7 +3578,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
/* terminate */
@@ -3647,7 +3652,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3885,7 +3890,7 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (void *) req->outstanding_cmds[h];
+ cmd = req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -4110,7 +4115,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
- cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
+ cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -5302,7 +5307,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = sess->loop_id;
+ ctio24->nport_handle = cpu_to_le16(sess->loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -5315,13 +5320,14 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
*/
- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id =
+ cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_datalen_for_atio(atio);
+ ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
/* Memory Barrier */
wmb();
@@ -5550,7 +5556,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
if (unlikely(atio->u.isp24.exchange_addr ==
- ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
@@ -5670,9 +5676,9 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
if (qpair == ha->base_qpair)
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
else
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -5713,8 +5719,8 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
entry->compl_status);
if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
+ if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
+ le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
ha->tgt.tgt_ops->free_mcmd(mcmd);
return;
@@ -5928,11 +5934,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
"qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
- (void *)&tgt->link_reinit_iocb,
+ &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5946,18 +5951,16 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
"qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
break;
case MBA_REJECTED_FCP_CMD:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
"qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- if (le16_to_cpu(mailbox[3]) == 1) {
+ if (mailbox[3] == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -5981,10 +5984,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
"qla_target(%d): Port update async event %#x "
"occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- login_code = le16_to_cpu(mailbox[2]);
+ login_code = mailbox[2];
if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
@@ -6661,9 +6663,14 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+
+ /*
+ * We are expecting the offline state.
+ * QLA_FUNCTION_FAILED means that adapter is offline.
+ */
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
ql_dbg(ql_dbg_tgt, vha, 0xe081,
- "qla2x00_wait_for_hba_online() failed\n");
+ "adapter is offline\n");
}
/*
@@ -6729,7 +6736,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt++;
}
@@ -6764,7 +6771,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
&pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
- le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+ pkt->u.isp24.exchange_addr, pkt);
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
@@ -6782,14 +6789,14 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
} else
ha->tgt.atio_ring_ptr++;
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
}
wmb();
}
/* Adjust ring index */
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
@@ -6802,19 +6809,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (!QLA_TGT_MODE_ENABLED())
return;
- WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
- RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+ wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
+ rd_reg_dword(ISP_ATIO_Q_OUT(vha));
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (IS_QLA2071(ha)) {
/* 4 ports Baker: Enable Interrupt Handshake */
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
} else {
icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= ~BIT_26;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
}
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
@@ -6824,7 +6831,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
/* INTx|MSI */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"%s: Use INTx for ATIOQ.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 3cf8590feeac..010f12523b2a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -135,37 +135,37 @@ struct nack_to_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t flags;
- uint16_t resp_code;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
- uint16_t srr_reject_code;
+ __le16 flags;
+ __le16 resp_code;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
+ __le16 srr_reject_code;
uint8_t srr_reject_vendor_uniq;
uint8_t srr_reject_code_expl;
uint8_t reserved_2[24];
} isp2x;
struct {
uint32_t handle;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_1;
- uint16_t flags;
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 flags;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
uint8_t reserved_4[19];
uint8_t vp_index;
uint8_t srr_reject_vendor_uniq;
@@ -175,7 +175,7 @@ struct nack_to_isp {
} isp24;
} u;
uint8_t reserved[2];
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
@@ -206,16 +206,16 @@ struct ctio_to_2xxx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
- uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t relative_offset;
- uint32_t residual;
- uint16_t reserved_1[3];
- uint16_t scsi_status;
- uint32_t transfer_length;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 relative_offset;
+ __le32 residual;
+ __le16 reserved_1[3];
+ __le16 scsi_status;
+ __le32 transfer_length;
struct dsd32 dsd[3];
} __packed;
#define ATIO_PATH_INVALID 0x07
@@ -257,7 +257,7 @@ struct fcp_hdr {
uint16_t seq_cnt;
__be16 ox_id;
uint16_t rx_id;
- uint32_t parameter;
+ __le32 parameter;
} __packed;
struct fcp_hdr_le {
@@ -267,12 +267,12 @@ struct fcp_hdr_le {
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t parameter;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 parameter;
} __packed;
#define F_CTL_EXCH_CONTEXT_RESP BIT_23
@@ -306,7 +306,7 @@ struct atio7_fcp_cmnd {
* BUILD_BUG_ON in qlt_init().
*/
uint8_t add_cdb[4];
- /* uint32_t data_length; */
+ /* __le32 data_length; */
} __packed;
/*
@@ -316,31 +316,31 @@ struct atio7_fcp_cmnd {
struct atio_from_isp {
union {
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
uint8_t command_ref;
uint8_t task_codes;
uint8_t task_flags;
uint8_t execution_codes;
uint8_t cdb[MAX_CMDSZ];
- uint32_t data_length;
- uint16_t lun;
+ __le32 data_length;
+ __le16 lun;
uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
- uint16_t reserved_32[6];
- uint16_t ox_id;
+ __le16 reserved_32[6];
+ __le16 ox_id;
} isp2x;
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t fcp_cmnd_len_low;
uint8_t fcp_cmnd_len_high:4;
uint8_t attr:4;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
struct fcp_hdr fcp_hdr;
struct atio7_fcp_cmnd fcp_cmnd;
@@ -352,7 +352,7 @@ struct atio_from_isp {
#define FCP_CMD_LENGTH_MASK 0x0fff
#define FCP_CMD_LENGTH_MIN 0x38
uint8_t data[56];
- uint32_t signature;
+ __le32 signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
@@ -395,36 +395,36 @@ struct ctio7_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t nport_handle;
+ __le16 nport_handle;
#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags;
le_id_t initiator_id;
uint8_t reserved;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
union {
struct {
- uint16_t reserved1;
+ __le16 reserved1;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint32_t relative_offset;
- uint32_t reserved2;
- uint32_t transfer_length;
- uint32_t reserved3;
+ __le16 scsi_status;
+ __le32 relative_offset;
+ __le32 reserved2;
+ __le32 transfer_length;
+ __le32 reserved3;
struct dsd64 dsd;
} status0;
struct {
- uint16_t sense_length;
+ __le16 sense_length;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint16_t response_len;
- uint16_t reserved;
+ __le16 scsi_status;
+ __le16 response_len;
+ __le16 reserved;
uint8_t sense_data[24];
} status1;
} u;
@@ -440,18 +440,18 @@ struct ctio7_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t status;
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 status;
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t reserved1[5];
- uint32_t exchange_address;
- uint16_t reserved2;
- uint16_t flags;
- uint32_t residual;
- uint16_t ox_id;
- uint16_t reserved3;
- uint32_t relative_offset;
+ __le32 exchange_address;
+ __le16 reserved2;
+ __le16 flags;
+ __le32 residual;
+ __le16 ox_id;
+ __le16 reserved3;
+ __le32 relative_offset;
uint8_t reserved4[24];
} __packed;
@@ -489,29 +489,29 @@ struct ctio_crc2_to_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
__le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
le_id_t initiator_id; /* initiator ID */
uint8_t reserved1;
- uint32_t exchange_addr; /* rcv exchange address */
- uint16_t reserved2;
+ __le32 exchange_addr; /* rcv exchange address */
+ __le16 reserved2;
__le16 flags; /* refer to CTIO7 flags values */
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
+ __le16 scsi_status;
__le32 relative_offset;
- uint32_t reserved5;
+ __le32 reserved5;
__le32 transfer_length; /* total fc transfer length */
- uint32_t reserved6;
+ __le32 reserved6;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
- uint16_t reserved_1; /* MUST be set to 0. */
+ __le16 crc_context_len; /* Data segment length. */
+ __le16 reserved_1; /* MUST be set to 0. */
};
/* CTIO Type CRC_x Status IOCB */
@@ -522,20 +522,20 @@ struct ctio_crc_from_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t status;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t reserved1;
- uint16_t state_flags;
+ __le16 status;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 reserved1;
+ __le16 state_flags;
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
- uint32_t exchange_address; /* rcv exchange address */
- uint16_t reserved2;
- uint16_t flags;
- uint32_t resid_xfer_length;
- uint16_t ox_id;
+ __le32 exchange_address; /* rcv exchange address */
+ __le16 reserved2;
+ __le16 flags;
+ __le32 resid_xfer_length;
+ __le16 ox_id;
uint8_t reserved3[12];
- uint16_t runt_guard; /* reported runt blk guard */
+ __le16 runt_guard; /* reported runt blk guard */
uint8_t actual_dif[8];
uint8_t expected_dif[8];
} __packed;
@@ -558,29 +558,29 @@ struct abts_recv_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint8_t reserved_1[6];
- uint16_t nport_handle;
+ __le16 nport_handle;
uint8_t reserved_2[2];
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[16];
- uint32_t exchange_addr_to_abort;
+ __le32 exchange_addr_to_abort;
} __packed;
#define ABTS_PARAM_ABORT_SEQ BIT_0
struct ba_acc_le {
- uint16_t reserved;
+ __le16 reserved;
uint8_t seq_id_last;
uint8_t seq_id_valid;
#define SEQ_ID_VALID 0x80
#define SEQ_ID_INVALID 0x00
- uint16_t rx_id;
- uint16_t ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} __packed;
struct ba_rjt_le {
@@ -604,21 +604,21 @@ struct abts_resp_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t reserved_1;
- uint16_t nport_handle;
- uint16_t control_flags;
+ __le16 reserved_1;
+ __le16 nport_handle;
+ __le16 control_flags;
#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
union {
struct ba_acc_le ba_acct;
struct ba_rjt_le ba_rjt;
} __packed payload;
- uint32_t reserved_4;
- uint32_t exchange_addr_to_abort;
+ __le32 reserved_4;
+ __le32 exchange_addr_to_abort;
} __packed;
/*
@@ -634,21 +634,21 @@ struct abts_resp_from_24xx_fw {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t compl_status;
+ __le16 compl_status;
#define ABTS_RESP_COMPL_SUCCESS 0
#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
- uint16_t nport_handle;
- uint16_t reserved_1;
+ __le16 nport_handle;
+ __le16 reserved_1;
uint8_t reserved_2;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[8];
- uint32_t error_subcode1;
+ __le32 error_subcode1;
#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
- uint32_t error_subcode2;
- uint32_t exchange_addr_to_abort;
+ __le32 error_subcode2;
+ __le32 exchange_addr_to_abort;
} __packed;
/********************************************************************\
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 6aeb1c3fb7a8..8dc82cfd38b2 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -12,6 +12,33 @@
#define IOBASE(vha) IOBAR(ISPREG(vha))
#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
+/* hardware_lock assumed held. */
+static void
+qla27xx_write_remote_reg(struct scsi_qla_host *vha,
+ u32 addr, u32 data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd300,
+ "%s: addr/data = %xh/%xh\n", __func__, addr, data);
+
+ wrt_reg_dword(&reg->iobase_addr, 0x40);
+ wrt_reg_dword(&reg->iobase_c4, data);
+ wrt_reg_dword(&reg->iobase_window, addr);
+}
+
+void
+qla27xx_reset_mpi(scsi_qla_host_t *vha)
+{
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
+ "Entered %s.\n", __func__);
+
+ qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
+ qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
+
+ vha->hw->stat.num_mpi_reset++;
+}
+
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
{
@@ -48,7 +75,7 @@ qla27xx_read8(void __iomem *window, void *buf, ulong *len)
uint8_t value = ~0;
if (buf) {
- value = RD_REG_BYTE(window);
+ value = rd_reg_byte(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -59,7 +86,7 @@ qla27xx_read16(void __iomem *window, void *buf, ulong *len)
uint16_t value = ~0;
if (buf) {
- value = RD_REG_WORD(window);
+ value = rd_reg_word(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -70,7 +97,7 @@ qla27xx_read32(void __iomem *window, void *buf, ulong *len)
uint32_t value = ~0;
if (buf) {
- value = RD_REG_DWORD(window);
+ value = rd_reg_dword(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -99,7 +126,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
if (buf) {
void __iomem *window = (void __iomem *)reg + offset;
- WRT_REG_DWORD(window, data);
+ wrt_reg_dword(window, data);
}
}
@@ -892,9 +919,9 @@ static void
qla27xx_firmware_info(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp)
{
- tmp->firmware_version[0] = vha->hw->fw_major_version;
- tmp->firmware_version[1] = vha->hw->fw_minor_version;
- tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
+ tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
+ tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
tmp->firmware_version[3] = cpu_to_le32(
vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
tmp->firmware_version[4] = cpu_to_le32(
@@ -998,14 +1025,65 @@ qla27xx_fwdt_template_valid(void *p)
}
void
-qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
+ bool need_mpi_reset = true;
#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
#endif
+ if (!vha->hw->mpi_fw_dump) {
+ ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
+ } else if (vha->hw->mpi_fw_dumped) {
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped (%p) -- ignoring request\n",
+ vha->hw->mpi_fw_dump);
+ } else {
+ struct fwdt *fwdt = &vha->hw->fwdt[1];
+ ulong len;
+ void *buf = vha->hw->mpi_fw_dump;
+
+ ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x02f6,
+ "-> fwdt1 no template\n");
+ goto bailout;
+ }
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0x02f7,
+ "-> fwdt1 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ } else {
+ need_mpi_reset = false;
+ }
+
+ vha->hw->mpi_fw_dump_len = len;
+ vha->hw->mpi_fw_dumped = 1;
+
+ ql_log(ql_log_warn, vha, 0x02f8,
+ "-> MPI firmware dump saved to buffer (%lu/%p)\n",
+ vha->host_no, vha->hw->mpi_fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+
+bailout:
+ if (need_mpi_reset)
+ qla27xx_reset_mpi(vha);
+#ifndef __CHECKER__
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha)
+{
+ lockdep_assert_held(&vha->hw->hardware_lock);
if (!vha->hw->fw_dump) {
ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
@@ -1015,42 +1093,30 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
vha->hw->fw_dump);
} else {
struct fwdt *fwdt = vha->hw->fwdt;
- uint j;
ulong len;
void *buf = vha->hw->fw_dump;
- uint count = vha->hw->fw_dump_mpi ? 2 : 1;
-
- for (j = 0; j < count; j++, fwdt++, buf += len) {
- ql_log(ql_log_warn, vha, 0xd011,
- "-> fwdt%u running...\n", j);
- if (!fwdt->template) {
- ql_log(ql_log_warn, vha, 0xd012,
- "-> fwdt%u no template\n", j);
- break;
- }
- len = qla27xx_execute_fwdt_template(vha,
- fwdt->template, buf);
- if (len == 0) {
- goto bailout;
- } else if (len != fwdt->dump_size) {
- ql_log(ql_log_warn, vha, 0xd013,
- "-> fwdt%u fwdump residual=%+ld\n",
- j, fwdt->dump_size - len);
- }
+
+ ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "-> fwdt0 no template\n");
+ return;
}
- vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
- vha->hw->fw_dumped = 1;
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ return;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "-> fwdt0 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ }
+
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = true;
ql_log(ql_log_warn, vha, 0xd015,
"-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
-
-bailout:
- vha->hw->fw_dump_mpi = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index bba8dc90acfb..89280b3477aa 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -27,7 +27,7 @@ struct __packed qla27xx_fwdt_template {
uint32_t saved_state[16];
uint32_t reserved_3[8];
- uint32_t firmware_version[5];
+ __le32 firmware_version[5];
};
#define TEMPLATE_TYPE_FWDUMP 99
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 1f0a185b2a95..68183a96a417 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -949,6 +949,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1111,6 +1112,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1958,6 +1960,20 @@ static int __init tcm_qla2xxx_init(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32);
+ BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64);
+ BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12);
+ BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4);
+ BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+ BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24);
+ BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64);
+
ret = tcm_qla2xxx_register_configfs();
if (ret < 0)
return ret;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5504ab11decc..5dc697ce8b5d 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -966,7 +966,7 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
"%s: No such sysfs attribute\n", __func__);
rc = -ENOSYS;
goto exit_set_chap;
- };
+ }
}
if (chap_rec.chap_type == CHAP_TYPE_IN)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4c6c448dc2df..843cccb38cb7 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7,7 +7,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2018 Douglas Gilbert
+ * Copyright (C) 2001 - 2020 Douglas Gilbert
*
* For documentation see http://sg.danny.cz/sg/sdebug26.html
*/
@@ -39,6 +39,9 @@
#include <linux/uuid.h>
#include <linux/t10-pi.h>
#include <linux/msdos_partition.h>
+#include <linux/random.h>
+#include <linux/xarray.h>
+#include <linux/prefetch.h>
#include <net/checksum.h>
@@ -57,8 +60,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20190125";
+#define SDEBUG_VERSION "0189" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20200421";
#define MY_NAME "scsi_debug"
@@ -91,6 +94,11 @@ static const char *sdebug_version_date = "20190125";
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
#define WRITE_ERROR_ASC 0xc
+#define UNALIGNED_WRITE_ASCQ 0x4
+#define WRITE_BOUNDARY_ASCQ 0x5
+#define READ_INVDATA_ASCQ 0x6
+#define READ_BOUNDARY_ASCQ 0x7
+#define INSUFF_ZONE_ASCQ 0xe
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,9 +113,12 @@ static const char *sdebug_version_date = "20190125";
#define DEF_ATO 1
#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_DEV_SIZE_PRE_INIT 0
#define DEF_DEV_SIZE_MB 8
+#define DEF_ZBC_DEV_SIZE_MB 128
#define DEF_DIF 0
#define DEF_DIX 0
+#define DEF_PER_HOST_STORE false
#define DEF_D_SENSE 0
#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
@@ -126,6 +137,7 @@ static const char *sdebug_version_date = "20190125";
#define DEF_PHYSBLK_EXP 0
#define DEF_OPT_XFERLEN_EXP 0
#define DEF_PTYPE TYPE_DISK
+#define DEF_RANDOM false
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
#define DEF_SECTOR_SIZE 512
@@ -142,6 +154,11 @@ static const char *sdebug_version_date = "20190125";
#define DEF_UUID_CTL 0
#define JDELAY_OVERRIDDEN -9999
+/* Default parameters for ZBC drives */
+#define DEF_ZBC_ZONE_SIZE_MB 128
+#define DEF_ZBC_MAX_OPEN_ZONES 8
+#define DEF_ZBC_NR_CONV_ZONES 1
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -219,21 +236,23 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
#define DEF_CMD_PER_LUN 255
-#define F_D_IN 1
-#define F_D_OUT 2
+/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
+#define F_D_IN 1 /* Data-in command (e.g. READ) */
+#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
#define F_D_UNKN 8
-#define F_RL_WLUN_OK 0x10
-#define F_SKIP_UA 0x20
-#define F_DELAY_OVERR 0x40
-#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
-#define F_INV_OP 0x200
-#define F_FAKE_RW 0x400
-#define F_M_ACCESS 0x800 /* media access */
-#define F_SSU_DELAY 0x1000
-#define F_SYNC_DELAY 0x2000
-
+#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
+#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
+#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
+#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
+#define F_INV_OP 0x200 /* invalid opcode (not supported) */
+#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
+#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
+#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
+#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
+
+/* Useful combinations of the above flags */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
@@ -243,6 +262,35 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_MAX_CMD_LEN 32
+#define SDEB_XA_NOT_IN_USE XA_MARK_1
+
+/* Zone types (zbcr05 table 25) */
+enum sdebug_z_type {
+ ZBC_ZONE_TYPE_CNV = 0x1,
+ ZBC_ZONE_TYPE_SWR = 0x2,
+ ZBC_ZONE_TYPE_SWP = 0x3,
+};
+
+/* enumeration names taken from table 26, zbcr05 */
+enum sdebug_z_cond {
+ ZBC_NOT_WRITE_POINTER = 0x0,
+ ZC1_EMPTY = 0x1,
+ ZC2_IMPLICIT_OPEN = 0x2,
+ ZC3_EXPLICIT_OPEN = 0x3,
+ ZC4_CLOSED = 0x4,
+ ZC6_READ_ONLY = 0xd,
+ ZC5_FULL = 0xe,
+ ZC7_OFFLINE = 0xf,
+};
+
+struct sdeb_zone_state { /* ZBC: per zone state */
+ enum sdebug_z_type z_type;
+ enum sdebug_z_cond z_cond;
+ bool z_non_seq_resource;
+ unsigned int z_size;
+ sector_t z_start;
+ sector_t z_wp;
+};
struct sdebug_dev_info {
struct list_head dev_list;
@@ -255,15 +303,36 @@ struct sdebug_dev_info {
atomic_t num_in_q;
atomic_t stopped;
bool used;
+
+ /* For ZBC devices */
+ enum blk_zoned_model zmodel;
+ unsigned int zsize;
+ unsigned int zsize_shift;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int nr_imp_open;
+ unsigned int nr_exp_open;
+ unsigned int nr_closed;
+ unsigned int max_open;
+ struct sdeb_zone_state *zstate;
};
struct sdebug_host_info {
struct list_head host_list;
+ int si_idx; /* sdeb_store_info (per host) xarray index */
struct Scsi_Host *shost;
struct device dev;
struct list_head dev_info_list;
};
+/* There is an xarray of pointers to this struct's objects, one per host */
+struct sdeb_store_info {
+ rwlock_t macc_lck; /* for atomic media access on this store */
+ u8 *storep; /* user data storage (ram) */
+ struct t10_pi_tuple *dif_storep; /* protection info */
+ void *map_storep; /* provisioning map */
+};
+
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
@@ -339,7 +408,7 @@ enum sdeb_opcode_index {
SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
- SDEB_I_VERIFY = 16, /* 10 only */
+ SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
@@ -352,7 +421,10 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 26, /* 10, 16 */
SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
SDEB_I_COMP_WRITE = 28,
- SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */
+ SDEB_I_PRE_FETCH = 29, /* 10, 16 */
+ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
+ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
+ SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
};
@@ -368,7 +440,7 @@ static const unsigned char opcode_ind_arr[256] = {
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
- 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
+ 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
@@ -382,8 +454,10 @@ static const unsigned char opcode_ind_arr[256] = {
0, SDEB_I_VARIABLE_LEN,
/* 0x80; 0x80->0x9f: 16 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
- SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
+ SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
+ 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
+ SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
@@ -424,11 +498,25 @@ static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+
+static int sdebug_do_add_host(bool mk_new_store);
+static int sdebug_add_host_helper(int per_host_idx);
+static void sdebug_do_remove_host(bool the_end);
+static int sdebug_add_store(void);
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
+static void sdebug_erase_all_stores(bool apart_from_first);
/*
* The following are overflow arrays for cdbs that "hit" the same index in
@@ -468,6 +556,12 @@ static const struct opcode_info_t write_iarr[] = {
0xbf, 0xc7, 0, 0, 0, 0} },
};
+static const struct opcode_info_t verify_iarr[] = {
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+};
+
static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -514,11 +608,35 @@ static const struct opcode_info_t sync_cache_iarr[] = {
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
+static const struct opcode_info_t pre_fetch_iarr[] = {
+ {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+};
+
+static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
+ {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
+ {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
+ {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
+};
+
+static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
+ {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
+};
+
/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
* plus the terminating elements for logic that scans this table such as
* REPORT SUPPORTED OPERATION CODES. */
-static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
+static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -568,9 +686,10 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
+ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
@@ -609,17 +728,31 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
-
-/* 29 */
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ resp_pre_fetch, pre_fetch_iarr,
+ {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* PRE-FETCH (10) */
+
+/* 30 */
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
+ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
+ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
+/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_num_hosts;
+static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
-static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
static int sdebug_dif = DEF_DIF;
static int sdebug_dix = DEF_DIX;
static int sdebug_dsense = DEF_D_SENSE;
@@ -656,6 +789,8 @@ static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_random = DEF_RANDOM;
+static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
@@ -666,6 +801,9 @@ static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
+/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
+static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static char *sdeb_zbc_model_s;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -679,9 +817,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-static unsigned char *fake_storep; /* ramdisk storage */
-static struct t10_pi_tuple *dif_storep; /* protection info */
-static void *map_storep; /* provisioning map */
+static struct xarray per_store_arr;
+static struct xarray *per_store_ap = &per_store_arr;
+static int sdeb_first_idx = -1; /* invalid index ==> none created */
+static int sdeb_most_recent_idx = -1;
+static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
static unsigned long map_size;
static int num_aborts;
@@ -693,10 +833,19 @@ static int dix_writes;
static int dix_reads;
static int dif_errors;
+/* ZBC global data */
+static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_size_mb;
+static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
+static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
+
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
+static DEFINE_RWLOCK(atomic_rw2);
+
+static rwlock_t *ramdisk_lck_a[2];
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -717,6 +866,8 @@ static const int illegal_condition_result =
static const int device_qfull_result =
(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+static const int condition_met_result = SAM_STAT_CONDITION_MET;
+
/* Only do the extra work involved in logical block provisioning if one or
* more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
@@ -728,18 +879,25 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *lba2fake_store(unsigned long long lba)
+static void *lba2fake_store(struct sdeb_store_info *sip,
+ unsigned long long lba)
{
- lba = do_div(lba, sdebug_store_sectors);
+ struct sdeb_store_info *lsip = sip;
- return fake_storep + lba * sdebug_sector_size;
+ lba = do_div(lba, sdebug_store_sectors);
+ if (!sip || !sip->storep) {
+ WARN_ON_ONCE(true);
+ lsip = xa_load(per_store_ap, 0); /* should never be NULL */
+ }
+ return lsip->storep + lba * sdebug_sector_size;
}
-static struct t10_pi_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
+ sector_t sector)
{
sector = sector_div(sector, sdebug_store_sectors);
- return dif_storep + sector;
+ return sip->dif_storep + sector;
}
static void sdebug_max_tgts_luns(void)
@@ -1041,7 +1199,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
n = scsi_bufflen(scp) - (off_dst + act_len);
- scsi_set_resid(scp, min(scsi_get_resid(scp), n));
+ scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
return 0;
}
@@ -1354,13 +1512,15 @@ static int inquiry_vpd_b0(unsigned char *arr)
}
/* Block device characteristics VPD page (SBC-3) */
-static int inquiry_vpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
+ if (devip->zmodel == BLK_ZONED_HA)
+ arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
@@ -1384,6 +1544,26 @@ static int inquiry_vpd_b2(unsigned char *arr)
return 0x4;
}
+/* Zoned block device characteristics VPD page (ZBC mandatory) */
+static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
+ /*
+ * Set Optimal number of open sequential write preferred zones and
+ * Optimal number of non-sequentially written sequential write
+ * preferred zones fields to 'not reported' (0xffffffff). Leave other
+ * fields set to zero, apart from Max. number of open swrz_s field.
+ */
+ put_unaligned_be32(0xffffffff, &arr[4]);
+ put_unaligned_be32(0xffffffff, &arr[8]);
+ if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
+ put_unaligned_be32(devip->max_open, &arr[12]);
+ else
+ put_unaligned_be32(0xffffffff, &arr[12]);
+ return 0x3c;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1393,13 +1573,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *arr;
unsigned char *cmd = scp->cmnd;
int alloc_len, n, ret;
- bool have_wlun, is_disk;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
pq_pdt = TYPE_WLUN; /* present, wlun */
@@ -1437,11 +1619,14 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0x86; /* extended inquiry */
arr[n++] = 0x87; /* mode page policy */
arr[n++] = 0x88; /* SCSI ports */
- if (is_disk) { /* SBC only */
+ if (is_disk_zbc) { /* SBC or ZBC */
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits */
arr[n++] = 0xb1; /* Block characteristics */
- arr[n++] = 0xb2; /* Logical Block Prov */
+ if (is_disk)
+ arr[n++] = 0xb2; /* LB Provisioning */
+ if (is_zbc)
+ arr[n++] = 0xb6; /* ZB dev. char. */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
@@ -1480,19 +1665,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (0x88 == cmd[2]) { /* SCSI Ports */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
- } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
+ } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
- } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
+ } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
- } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
+ } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_vpd_b1(&arr[4]);
+ arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
+ } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_vpd_b6(devip, &arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -1530,10 +1718,13 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
+ } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
+ put_unaligned_be16(0x624, arr + n);
+ n += 2;
}
put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_LONG_INQ_SZ));
+ min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
@@ -1688,7 +1879,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
}
return fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
+ min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
@@ -1762,9 +1953,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
* - The constructed command length
* - The maximum array size
*/
- rlen = min(alen,n);
+ rlen = min_t(int, alen, n);
ret = fill_from_dev_buffer(scp, arr,
- min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
kfree(arr);
return ret;
}
@@ -2119,7 +2310,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -2128,7 +2319,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- if (is_disk && !dbd)
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2140,8 +2332,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
- /* for disks set DPOFUA bit and clear write protect (WP) bit */
- if (is_disk) {
+ /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
+ if (is_disk || is_zbc) {
dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
if (sdebug_wp)
dev_spec |= 0x80;
@@ -2201,7 +2393,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
bad_pcode = true;
break;
case 0x8: /* Caching page, direct access */
- if (is_disk) {
+ if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
} else
@@ -2239,6 +2431,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
target);
len += resp_caching_pg(ap + len, pcontrol,
target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol,
+ target);
}
len += resp_ctrl_m_pg(ap + len, pcontrol, target);
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2266,7 +2461,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
- return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
+ return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -2451,14 +2646,217 @@ static int resp_log_sense(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
+}
+
+static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
+{
+ return devip->nr_zones != 0;
+}
+
+static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
+ unsigned long long lba)
+{
+ return &devip->zstate[lba >> devip->zsize_shift];
+}
+
+static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZONE_TYPE_CNV;
+}
+
+static void zbc_close_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
+ return;
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ devip->nr_imp_open--;
+ else
+ devip->nr_exp_open--;
+
+ if (zsp->z_wp == zsp->z_start) {
+ zsp->z_cond = ZC1_EMPTY;
+ } else {
+ zsp->z_cond = ZC4_CLOSED;
+ devip->nr_closed++;
+ }
+}
+
+static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
+ zbc_close_zone(devip, zsp);
+ return;
+ }
+ }
+}
+
+static void zbc_open_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool explicit)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
+ (!explicit && zc == ZC2_IMPLICIT_OPEN))
+ return;
+
+ /* Close an implicit open zone if necessary */
+ if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ else if (devip->max_open &&
+ devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
+ zbc_close_imp_open_zone(devip);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ if (explicit) {
+ zsp->z_cond = ZC3_EXPLICIT_OPEN;
+ devip->nr_exp_open++;
+ } else {
+ zsp->z_cond = ZC2_IMPLICIT_OPEN;
+ devip->nr_imp_open++;
+ }
+}
+
+static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+{
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+ return;
+ }
+
+ while (num) {
+ if (lba != zsp->z_wp)
+ zsp->z_non_seq_resource = true;
+
+ end = lba + num;
+ if (end >= zend) {
+ n = zend - lba;
+ zsp->z_wp = zend;
+ } else if (end > zsp->z_wp) {
+ n = num;
+ zsp->z_wp = end;
+ } else {
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+
+ num -= n;
+ lba += n;
+ if (num) {
+ zsp++;
+ zend = zsp->z_start + zsp->z_size;
+ }
+ }
}
-static inline int check_device_access_params(struct scsi_cmnd *scp,
- unsigned long long lba, unsigned int num, bool write)
+static int check_zbc_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num, bool write)
{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
+
+ if (!write) {
+ if (devip->zmodel == BLK_ZONED_HA)
+ return 0;
+ /* For host-managed, reads cannot cross zone types boundaries */
+ if (zsp_end != zsp &&
+ zbc_zone_is_conv(zsp) &&
+ !zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ READ_INVDATA_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ /* No restrictions for writes within conventional zones */
+ if (zbc_zone_is_conv(zsp)) {
+ if (!zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ /* Writes cannot cross sequential zone boundaries */
+ if (zsp_end != zsp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ /* Cannot write full zones */
+ if (zsp->z_cond == ZC5_FULL) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ /* Writes must be aligned to the zone WP */
+ if (lba != zsp->z_wp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+ }
+
+ /* Handle implicit open of closed and empty zones */
+ if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
+ if (devip->max_open &&
+ devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT,
+ INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ return check_condition_result;
+ }
+ zbc_open_zone(devip, zsp, false);
+ }
+
+ return 0;
+}
+
+static inline int check_device_access_params
+ (struct scsi_cmnd *scp, unsigned long long lba,
+ unsigned int num, bool write)
+{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+
if (lba + num > sdebug_capacity) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
@@ -2473,17 +2871,37 @@ static inline int check_device_access_params(struct scsi_cmnd *scp,
mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
return check_condition_result;
}
+ if (sdebug_dev_is_zoned(devip))
+ return check_zbc_access_params(scp, lba, num, write);
+
return 0;
}
+/*
+ * Note: if BUG_ON() fires it usually indicates a problem with the parser
+ * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
+ * that access any of the "stores" in struct sdeb_store_info should call this
+ * function with bug_if_fake_rw set to true.
+ */
+static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
+ bool bug_if_fake_rw)
+{
+ if (sdebug_fake_rw) {
+ BUG_ON(bug_if_fake_rw); /* See note above */
+ return NULL;
+ }
+ return xa_load(per_store_ap, devip->sdbg_host->si_idx);
+}
+
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
- u32 num, bool do_write)
+static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
+ u32 sg_skip, u64 lba, u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
- struct scsi_data_buffer *sdb = &scmd->sdb;
enum dma_data_direction dir;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ u8 *fsp;
if (do_write) {
dir = DMA_TO_DEVICE;
@@ -2492,24 +2910,25 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
dir = DMA_FROM_DEVICE;
}
- if (!sdb->length)
+ if (!sdb->length || !sip)
return 0;
- if (scmd->sc_data_direction != dir)
+ if (scp->sc_data_direction != dir)
return -1;
+ fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors;
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep + (block * sdebug_sector_size),
+ fsp + (block * sdebug_sector_size),
(num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep, rest * sdebug_sector_size,
+ fsp, rest * sdebug_sector_size,
sg_skip + ((num - rest) * sdebug_sector_size),
do_write);
}
@@ -2517,34 +2936,49 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
-/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into lba2fake_store(lba,num) and return true. If comparison fails then
+/* Returns number of bytes copied or -1 if error. */
+static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
+{
+ struct scsi_data_buffer *sdb = &scp->sdb;
+
+ if (!sdb->length)
+ return 0;
+ if (scp->sc_data_direction != DMA_TO_DEVICE)
+ return -1;
+ return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
+ num * sdebug_sector_size, 0, true);
+}
+
+/* If sip->storep+lba compares equal to arr(num), then copy top half of
+ * arr into sip->storep+lba and return true. If comparison fails then
* return false. */
-static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
+ const u8 *arr, bool compare_only)
{
bool res;
u64 block, rest = 0;
u32 store_blks = sdebug_store_sectors;
u32 lb_size = sdebug_sector_size;
+ u8 *fsp = sip->storep;
block = do_div(lba, store_blks);
if (block + num > store_blks)
rest = block + num - store_blks;
- res = !memcmp(fake_storep + (block * lb_size), arr,
- (num - rest) * lb_size);
+ res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (!res)
return res;
if (rest)
- res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
+ res = memcmp(fsp, arr + ((num - rest) * lb_size),
rest * lb_size);
if (!res)
return res;
+ if (compare_only)
+ return true;
arr += num * lb_size;
- memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
+ memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (rest)
- memcpy(fake_storep, arr + ((num - rest) * lb_size),
- rest * lb_size);
+ memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
return res;
}
@@ -2587,24 +3021,27 @@ static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
return 0;
}
-static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
unsigned int sectors, bool read)
{
size_t resid;
void *paddr;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *dif_storep = sip->dif_storep;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
- scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
- (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+ sg_miter_start(&miter, scsi_prot_sglist(scp),
+ scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
while (sg_miter_next(&miter) && resid > 0) {
- size_t len = min(miter.length, resid);
- void *start = dif_store(sector);
+ size_t len = min_t(size_t, miter.length, resid);
+ void *start = dif_store(sip, sector);
size_t rest = 0;
if (dif_store_end < start + len)
@@ -2630,30 +3067,33 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sg_miter_stop(&miter);
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
unsigned int i;
- struct t10_pi_tuple *sdt;
sector_t sector;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
int ret;
sector = start_sec + i;
- sdt = dif_store(sector);
+ sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
+ ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
+ ei_lba);
if (ret) {
dif_errors++;
return ret;
}
}
- dif_copy_prot(SCpnt, start_sec, sectors, true);
+ dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
return 0;
@@ -2661,14 +3101,15 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- struct sdebug_queued_cmd *sqcp;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+ u8 *cmd = scp->cmnd;
+ struct sdebug_queued_cmd *sqcp;
switch (cmd[0]) {
case READ_16:
@@ -2750,21 +3191,21 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- read_lock_irqsave(&atomic_rw, iflags);
+ read_lock(macc_lckp);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
if (prot_ret) {
- read_unlock_irqrestore(&atomic_rw, iflags);
+ read_unlock(macc_lckp);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, false);
- read_unlock_irqrestore(&atomic_rw, iflags);
+ ret = do_device_access(sip, scp, 0, lba, num, false);
+ read_unlock(macc_lckp);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2902,7 +3343,8 @@ static sector_t map_index_to_lba(unsigned long index)
return lba;
}
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int *num)
{
sector_t end;
unsigned int mapped;
@@ -2910,19 +3352,20 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
unsigned long next;
index = lba_to_map_index(lba);
- mapped = test_bit(index, map_storep);
+ mapped = test_bit(index, sip->map_storep);
if (mapped)
- next = find_next_zero_bit(map_storep, map_size, index);
+ next = find_next_zero_bit(sip->map_storep, map_size, index);
else
- next = find_next_bit(map_storep, map_size, index);
+ next = find_next_bit(sip->map_storep, map_size, index);
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
return mapped;
}
-static void map_region(sector_t lba, unsigned int len)
+static void map_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
@@ -2930,15 +3373,17 @@ static void map_region(sector_t lba, unsigned int len)
unsigned long index = lba_to_map_index(lba);
if (index < map_size)
- set_bit(index, map_storep);
+ set_bit(index, sip->map_storep);
lba = map_index_to_lba(index + 1);
}
}
-static void unmap_region(sector_t lba, unsigned int len)
+static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
+ u8 *fsp = sip->storep;
while (lba < end) {
unsigned long index = lba_to_map_index(lba);
@@ -2946,17 +3391,16 @@ static void unmap_region(sector_t lba, unsigned int len)
if (lba == map_index_to_lba(index) &&
lba + sdebug_unmap_granularity <= end &&
index < map_size) {
- clear_bit(index, map_storep);
+ clear_bit(index, sip->map_storep);
if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
- memset(fake_storep +
- lba * sdebug_sector_size,
+ memset(fsp + lba * sdebug_sector_size,
(sdebug_lbprz & 1) ? 0 : 0xff,
sdebug_sector_size *
sdebug_unmap_granularity);
}
- if (dif_storep) {
- memset(dif_storep + lba, 0xff,
- sizeof(*dif_storep) *
+ if (sip->dif_storep) {
+ memset(sip->dif_storep + lba, 0xff,
+ sizeof(*sip->dif_storep) *
sdebug_unmap_granularity);
}
}
@@ -2966,13 +3410,14 @@ static void unmap_region(sector_t lba, unsigned int len)
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *cmd = scp->cmnd;
switch (cmd[0]) {
case WRITE_16:
@@ -3025,26 +3470,32 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
- write_lock_irqsave(&atomic_rw, iflags);
+ }
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
if (prot_ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
- write_unlock_irqrestore(&atomic_rw, iflags);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
+ write_unlock(macc_lckp);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -3085,13 +3536,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u8 *lrdp = NULL;
u8 *up;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
- unsigned long iflags;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
@@ -3153,7 +3605,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -3196,9 +3648,12 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
+ map_region(sip, lba, num);
if (unlikely(-1 == ret)) {
ret = DID_ERROR << 16;
goto err_out_unlock;
@@ -3236,7 +3691,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
err_out:
kfree(lrdp);
return ret;
@@ -3245,27 +3700,35 @@ err_out:
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
- int ret;
- unsigned long iflags;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
unsigned long long i;
- u32 lb_size = sdebug_sector_size;
u64 block, lbaa;
+ u32 lb_size = sdebug_sector_size;
+ int ret;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fs1p;
+ u8 *fsp;
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
-
- write_lock_irqsave(&atomic_rw, iflags);
+ }
if (unmap && scsi_debug_lbp()) {
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
goto out;
}
lbaa = lba;
block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
- fs1p = fake_storep + (block * lb_size);
+ fsp = sip->storep;
+ fs1p = fsp + (block * lb_size);
if (ndob) {
memset(fs1p, 0, lb_size);
ret = 0;
@@ -3273,7 +3736,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(&sip->macc_lck);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
@@ -3284,12 +3747,15 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
for (i = 1 ; i < num ; i++) {
lbaa = lba + i;
block = do_div(lbaa, sdebug_store_sectors);
- memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ memmove(fsp + (block * lb_size), fs1p, lb_size);
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
return 0;
}
@@ -3401,12 +3867,12 @@ static int resp_comp_write(struct scsi_cmnd *scp,
{
u8 *cmd = scp->cmnd;
u8 *arr;
- u8 *fake_storep_hold;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
u8 num;
- unsigned long iflags;
int ret;
int retval = 0;
@@ -3435,14 +3901,9 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
- /* trick do_device_access() to fetch both compare and write buffers
- * from data-in into arr. Safe (atomic) since write_lock held. */
- fake_storep_hold = fake_storep;
- fake_storep = arr;
- ret = do_device_access(scp, 0, 0, dnum, true);
- fake_storep = fake_storep_hold;
+ ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
retval = DID_ERROR << 16;
goto cleanup;
@@ -3450,15 +3911,15 @@ static int resp_comp_write(struct scsi_cmnd *scp,
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
- if (!comp_write_worker(lba, num, arr)) {
+ if (!comp_write_worker(sip, lba, num, arr, false)) {
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
retval = check_condition_result;
goto cleanup;
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
cleanup:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(arr);
return retval;
}
@@ -3473,10 +3934,10 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *buf;
struct unmap_block_desc *desc;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
unsigned int i, payload_len, descriptors;
int ret;
- unsigned long iflags;
-
if (!scsi_debug_lbp())
return 0; /* fib and say its done */
@@ -3503,7 +3964,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -3513,13 +3974,13 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ret)
goto out;
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
}
ret = 0;
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(buf);
return ret;
@@ -3533,8 +3994,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u64 lba;
u32 alloc_len, mapped, num;
- u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
int ret;
+ u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
lba = get_unaligned_be64(cmd + 2);
alloc_len = get_unaligned_be32(cmd + 10);
@@ -3546,9 +4007,11 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
if (ret)
return ret;
- if (scsi_debug_lbp())
- mapped = map_state(lba, &num);
- else {
+ if (scsi_debug_lbp()) {
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+
+ mapped = map_state(sip, lba, &num);
+ } else {
mapped = 1;
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
@@ -3593,6 +4056,56 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
return res;
}
+/*
+ * Assuming the LBA+num_blocks is not out-of-range, this function will return
+ * CONDITION MET if the specified blocks will/have fitted in the cache, and
+ * a GOOD status otherwise. Model a disk with a big cache and yield
+ * CONDITION MET. Actually tries to bring range in main memory into the
+ * cache associated with the CPU(s).
+ */
+static int resp_pre_fetch(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 lba;
+ u64 block, rest = 0;
+ u32 nblks;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *fsp = sip->storep;
+
+ if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
+ lba = get_unaligned_be32(cmd + 2);
+ nblks = get_unaligned_be16(cmd + 7);
+ } else { /* PRE-FETCH(16) */
+ lba = get_unaligned_be64(cmd + 2);
+ nblks = get_unaligned_be32(cmd + 10);
+ }
+ if (lba + nblks > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ if (!fsp)
+ goto fini;
+ /* PRE-FETCH spec says nothing about LBP or PI so skip them */
+ block = do_div(lba, sdebug_store_sectors);
+ if (block + nblks > sdebug_store_sectors)
+ rest = block + nblks - sdebug_store_sectors;
+
+ /* Try to bring the PRE-FETCH range into CPU's cache */
+ read_lock(macc_lckp);
+ prefetch_range(fsp + (sdebug_sector_size * block),
+ (nblks - rest) * sdebug_sector_size);
+ if (rest)
+ prefetch_range(fsp, rest * sdebug_sector_size);
+ read_unlock(macc_lckp);
+fini:
+ if (cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ return res | condition_met_result;
+}
+
#define RL_BUCKET_ELEMS 8
/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
@@ -3694,6 +4207,504 @@ static int resp_report_luns(struct scsi_cmnd *scp,
return res;
}
+static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool is_bytchk3 = false;
+ u8 bytchk;
+ int ret, j;
+ u32 vnum, a_num, off;
+ const u32 lb_size = sdebug_sector_size;
+ u64 lba;
+ u8 *arr;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+
+ bytchk = (cmd[1] >> 1) & 0x3;
+ if (bytchk == 0) {
+ return 0; /* always claim internal verify okay */
+ } else if (bytchk == 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
+ return check_condition_result;
+ } else if (bytchk == 3) {
+ is_bytchk3 = true; /* 1 block sent, compared repeatedly */
+ }
+ switch (cmd[0]) {
+ case VERIFY_16:
+ lba = get_unaligned_be64(cmd + 2);
+ vnum = get_unaligned_be32(cmd + 10);
+ break;
+ case VERIFY: /* is VERIFY(10) */
+ lba = get_unaligned_be32(cmd + 2);
+ vnum = get_unaligned_be16(cmd + 7);
+ break;
+ default:
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ a_num = is_bytchk3 ? 1 : vnum;
+ /* Treat following check like one for read (i.e. no write) access */
+ ret = check_device_access_params(scp, lba, a_num, false);
+ if (ret)
+ return ret;
+
+ arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ /* Not changing store, so only need read access */
+ read_lock(macc_lckp);
+
+ ret = do_dout_fetch(scp, a_num, arr);
+ if (ret == -1) {
+ ret = DID_ERROR << 16;
+ goto cleanup;
+ } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, __func__, a_num * lb_size, ret);
+ }
+ if (is_bytchk3) {
+ for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
+ memcpy(arr + off, arr, lb_size);
+ }
+ ret = 0;
+ if (!comp_write_worker(sip, lba, vnum, arr, true)) {
+ mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
+ ret = check_condition_result;
+ goto cleanup;
+ }
+cleanup:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+#define RZONES_DESC_HD 64
+
+/* Report zones depending on start LBA nad reporting options */
+static int resp_report_zones(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned int i, max_zones, rep_max_zones, nrz = 0;
+ int ret = 0;
+ u32 alloc_len, rep_opts, rep_len;
+ bool partial;
+ u64 lba, zs_lba;
+ u8 *arr = NULL, *desc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ zs_lba = get_unaligned_be64(cmd + 2);
+ alloc_len = get_unaligned_be32(cmd + 10);
+ rep_opts = cmd[14] & 0x3f;
+ partial = cmd[14] & 0x80;
+
+ if (zs_lba >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+
+ max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
+ rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
+ max_zones);
+
+ arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ read_lock(macc_lckp);
+
+ desc = arr + 64;
+ for (i = 0; i < max_zones; i++) {
+ lba = zs_lba + devip->zsize * i;
+ if (lba > sdebug_capacity)
+ break;
+ zsp = zbc_zone(devip, lba);
+ switch (rep_opts) {
+ case 0x00:
+ /* All zones */
+ break;
+ case 0x01:
+ /* Empty zones */
+ if (zsp->z_cond != ZC1_EMPTY)
+ continue;
+ break;
+ case 0x02:
+ /* Implicit open zones */
+ if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
+ continue;
+ break;
+ case 0x03:
+ /* Explicit open zones */
+ if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
+ continue;
+ break;
+ case 0x04:
+ /* Closed zones */
+ if (zsp->z_cond != ZC4_CLOSED)
+ continue;
+ break;
+ case 0x05:
+ /* Full zones */
+ if (zsp->z_cond != ZC5_FULL)
+ continue;
+ break;
+ case 0x06:
+ case 0x07:
+ case 0x10:
+ /*
+ * Read-only, offline, reset WP recommended are
+ * not emulated: no zones to report;
+ */
+ continue;
+ case 0x11:
+ /* non-seq-resource set */
+ if (!zsp->z_non_seq_resource)
+ continue;
+ break;
+ case 0x3f:
+ /* Not write pointer (conventional) zones */
+ if (!zbc_zone_is_conv(zsp))
+ continue;
+ break;
+ default:
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ ret = check_condition_result;
+ goto fini;
+ }
+
+ if (nrz < rep_max_zones) {
+ /* Fill zone descriptor */
+ desc[0] = zsp->z_type;
+ desc[1] = zsp->z_cond << 4;
+ if (zsp->z_non_seq_resource)
+ desc[1] |= 1 << 1;
+ put_unaligned_be64((u64)zsp->z_size, desc + 8);
+ put_unaligned_be64((u64)zsp->z_start, desc + 16);
+ put_unaligned_be64((u64)zsp->z_wp, desc + 24);
+ desc += 64;
+ }
+
+ if (partial && nrz >= rep_max_zones)
+ break;
+
+ nrz++;
+ }
+
+ /* Report header */
+ put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+ put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+
+ rep_len = (unsigned long)desc - (unsigned long)arr;
+ ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
+
+fini:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+/* Logic transplanted from tcmu-runner, file_zbc.c */
+static void zbc_open_all(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC4_CLOSED)
+ zbc_open_zone(devip, &devip->zstate[i], true);
+ }
+}
+
+static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ enum sdebug_z_cond zc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ /* Check if all closed zones can be open */
+ if (devip->max_open &&
+ devip->nr_exp_open + devip->nr_closed > devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+ /* Open all closed zones */
+ zbc_open_all(devip);
+ goto fini;
+ }
+
+ /* Open the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zc = zsp->z_cond;
+ if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
+ goto fini;
+
+ if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ zbc_open_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_close_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_close_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_close_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_close_all(devip);
+ goto fini;
+ }
+
+ /* Close specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_close_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_finish_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool empty)
+{
+ enum sdebug_z_cond zc = zsp->z_cond;
+
+ if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
+ zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ zsp->z_wp = zsp->z_start + zsp->z_size;
+ zsp->z_cond = ZC5_FULL;
+ }
+}
+
+static void zbc_finish_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_finish_zone(devip, &devip->zstate[i], false);
+}
+
+static int resp_finish_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_finish_all(devip);
+ goto fini;
+ }
+
+ /* Finish the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_finish_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_rwp_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+
+ zsp->z_non_seq_resource = false;
+ zsp->z_wp = zsp->z_start;
+ zsp->z_cond = ZC1_EMPTY;
+}
+
+static void zbc_rwp_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_rwp_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_rwp_all(devip);
+ goto fini;
+ }
+
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_rwp_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u32 tag = blk_mq_unique_tag(cmnd->request);
@@ -3799,6 +4810,92 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
static bool got_shared_uuid;
static uuid_t shared_uuid;
+static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ sector_t capacity = get_sdebug_capacity();
+ sector_t zstart = 0;
+ unsigned int i;
+
+ /*
+ * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
+ * a zone size allowing for at least 4 zones on the device. Otherwise,
+ * use the specified zone size checking that at least 2 zones can be
+ * created for the device.
+ */
+ if (!sdeb_zbc_zone_size_mb) {
+ devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ while (capacity < devip->zsize << 2 && devip->zsize >= 2)
+ devip->zsize >>= 1;
+ if (devip->zsize < 2) {
+ pr_err("Device capacity too small\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
+ pr_err("Zone size is not a power of 2\n");
+ return -EINVAL;
+ }
+ devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ if (devip->zsize >= capacity) {
+ pr_err("Zone size too large for device capacity\n");
+ return -EINVAL;
+ }
+ }
+
+ devip->zsize_shift = ilog2(devip->zsize);
+ devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
+
+ if (sdeb_zbc_nr_conv >= devip->nr_zones) {
+ pr_err("Number of conventional zones too large\n");
+ return -EINVAL;
+ }
+ devip->nr_conv_zones = sdeb_zbc_nr_conv;
+
+ if (devip->zmodel == BLK_ZONED_HM) {
+ /* zbc_max_open_zones can be 0, meaning "not reported" */
+ if (sdeb_zbc_max_open >= devip->nr_zones - 1)
+ devip->max_open = (devip->nr_zones - 1) / 2;
+ else
+ devip->max_open = sdeb_zbc_max_open;
+ }
+
+ devip->zstate = kcalloc(devip->nr_zones,
+ sizeof(struct sdeb_zone_state), GFP_KERNEL);
+ if (!devip->zstate)
+ return -ENOMEM;
+
+ for (i = 0; i < devip->nr_zones; i++) {
+ zsp = &devip->zstate[i];
+
+ zsp->z_start = zstart;
+
+ if (i < devip->nr_conv_zones) {
+ zsp->z_type = ZBC_ZONE_TYPE_CNV;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ } else {
+ if (devip->zmodel == BLK_ZONED_HM)
+ zsp->z_type = ZBC_ZONE_TYPE_SWR;
+ else
+ zsp->z_type = ZBC_ZONE_TYPE_SWP;
+ zsp->z_cond = ZC1_EMPTY;
+ zsp->z_wp = zsp->z_start;
+ }
+
+ if (zsp->z_start + devip->zsize < capacity)
+ zsp->z_size = devip->zsize;
+ else
+ zsp->z_size = capacity - zsp->z_start;
+
+ zstart += zsp->z_size;
+ }
+
+ return 0;
+}
+
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
{
@@ -3818,6 +4915,16 @@ static struct sdebug_dev_info *sdebug_device_create(
}
}
devip->sdbg_host = sdbg_host;
+ if (sdeb_zbc_in_use) {
+ devip->zmodel = sdeb_zbc_model;
+ if (sdebug_device_create_zones(devip)) {
+ kfree(devip);
+ return NULL;
+ }
+ } else {
+ devip->zmodel = BLK_ZONED_NONE;
+ }
+ devip->sdbg_host = sdbg_host;
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
@@ -4144,8 +5251,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
-static void __init sdebug_build_parts(unsigned char *ramp,
- unsigned long store_size)
+static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
struct msdos_partition *pp;
int starts[SDEBUG_MAX_PARTS + 2];
@@ -4247,6 +5353,8 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
+#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -4258,8 +5366,10 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- unsigned long iflags;
+ bool new_sd_dp;
int k, num_in_q, qdepth, inject;
+ unsigned long iflags;
+ u64 ns_from_boot = 0;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
@@ -4275,7 +5385,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- /* schedule the response at a later time if resources permit */
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
@@ -4334,13 +5443,17 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (sd_dp == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
+ new_sd_dp = true;
+ } else {
+ new_sd_dp = false;
}
+ if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
+ ns_from_boot = ktime_get_boottime_ns();
+
+ /* one of the resp_*() response functions is called here */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
if (cmnd->result & SDEG_RES_IMMED_MASK) {
- /*
- * This is the F_DELAY_OVERR case. No delay.
- */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
delta_jiff = ndelay = 0;
}
@@ -4355,9 +5468,37 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
ktime_t kt;
if (delta_jiff > 0) {
- kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
- } else
- kt = ndelay;
+ u64 ns = jiffies_to_nsecs(delta_jiff);
+
+ if (sdebug_random && ns < U32_MAX) {
+ ns = prandom_u32_max((u32)ns);
+ } else if (sdebug_random) {
+ ns >>= 12; /* scale to 4 usec precision */
+ if (ns < U32_MAX) /* over 4 hours max */
+ ns = prandom_u32_max((u32)ns);
+ ns <<= 12;
+ }
+ kt = ns_to_ktime(ns);
+ } else { /* ndelay has a 4.2 second max */
+ kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
+ (u32)ndelay;
+ if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
+ u64 d = ktime_get_boottime_ns() - ns_from_boot;
+
+ if (kt <= d) { /* elapsed duration >= kt */
+ sqcp->a_cmnd = NULL;
+ atomic_dec(&devip->num_in_q);
+ clear_bit(k, sqp->in_use_bm);
+ if (new_sd_dp)
+ kfree(sd_dp);
+ /* call scsi_done() from this thread */
+ cmnd->scsi_done(cmnd);
+ return 0;
+ }
+ /* otherwise reduce kt by elapsed time */
+ kt -= d;
+ }
+ }
if (!sd_dp->init_hrt) {
sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
@@ -4370,6 +5511,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_HRT;
+ /* schedule the invocation of scsi_done() for a later time */
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
if (!sd_dp->init_wq) {
@@ -4427,31 +5569,36 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_string(inq_vendor, sdebug_inq_vendor_id,
- sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
module_param_string(inq_product, sdebug_inq_product_id,
- sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
module_param_string(inq_rev, sdebug_inq_product_rev,
- sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
+module_param_named(medium_error_count, sdebug_medium_error_count, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(medium_error_start, sdebug_medium_error_start, int,
+ S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(per_host_store, sdebug_per_host_store, bool,
+ S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
@@ -4462,20 +5609,24 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
+module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
+module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
+module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SDEBUG_VERSION);
-MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
@@ -4488,30 +5639,32 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
-MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
SDEBUG_VERSION "\")");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(lbprz,
+ "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz,
- "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
-MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
+MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
@@ -4528,6 +5681,10 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
+MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
+MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
+MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
#define SDEBUG_INFO_LEN 256
static char sdebug_info[SDEBUG_INFO_LEN];
@@ -4576,6 +5733,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int f, j, l;
struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -4611,6 +5769,34 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
"first,last bits", f, l);
}
}
+
+ seq_printf(m, "this host_no=%d\n", host->host_no);
+ if (!xa_empty(per_store_ap)) {
+ bool niu;
+ int idx;
+ unsigned long l_idx;
+ struct sdeb_store_info *sip;
+
+ seq_puts(m, "\nhost list:\n");
+ j = 0;
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ idx = sdhp->si_idx;
+ seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
+ sdhp->shost->host_no, idx);
+ ++j;
+ }
+ seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
+ sdeb_most_recent_idx);
+ j = 0;
+ xa_for_each(per_store_ap, l_idx, sip) {
+ niu = xa_get_mark(per_store_ap, l_idx,
+ SDEB_XA_NOT_IN_USE);
+ idx = (int)l_idx;
+ seq_printf(m, " %d: idx=%d%s\n", j, idx,
+ (niu ? " not_in_use" : ""));
+ ++j;
+ }
+ }
return 0;
}
@@ -4734,7 +5920,13 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
{
int n;
+ /* Cannot change from or to TYPE_ZBC with sysfs */
+ if (sdebug_ptype == TYPE_ZBC)
+ return -EINVAL;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ if (n == TYPE_ZBC)
+ return -EINVAL;
sdebug_ptype = n;
return count;
}
@@ -4766,25 +5958,41 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n, idx;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool want_store = (n == 0);
+ struct sdebug_host_info *sdhp;
+
n = (n > 0);
sdebug_fake_rw = (sdebug_fake_rw > 0);
- if (sdebug_fake_rw != n) {
- if ((0 == n) && (NULL == fake_storep)) {
- unsigned long sz =
- (unsigned long)sdebug_dev_size_mb *
- 1048576;
-
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 9\n");
- return -ENOMEM;
+ if (sdebug_fake_rw == n)
+ return count; /* not transitioning so do nothing */
+
+ if (want_store) { /* 1 --> 0 transition, set up store */
+ if (sdeb_first_idx < 0) {
+ idx = sdebug_add_store();
+ if (idx < 0)
+ return idx;
+ } else {
+ idx = sdeb_first_idx;
+ xa_clear_mark(per_store_ap, idx,
+ SDEB_XA_NOT_IN_USE);
+ }
+ /* make all hosts use same store */
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ if (sdhp->si_idx != idx) {
+ xa_set_mark(per_store_ap, sdhp->si_idx,
+ SDEB_XA_NOT_IN_USE);
+ sdhp->si_idx = idx;
}
}
- sdebug_fake_rw = n;
+ sdeb_most_recent_idx = idx;
+ } else { /* 0 --> 1 transition is trigger for shrink */
+ sdebug_erase_all_stores(true /* apart from first */);
}
+ sdebug_fake_rw = n;
return count;
}
return -EINVAL;
@@ -4832,6 +6040,24 @@ static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(dev_size_mb);
+static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
+}
+
+static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_per_host_store = v;
+ return count;
+}
+static DRIVER_ATTR_RW(per_host_store);
+
static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
@@ -4957,6 +6183,10 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
int n;
bool changed;
+ /* Ignore capacity change for ZBC drives for now */
+ if (sdeb_zbc_in_use)
+ return -ENOTSUPP;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
changed = (sdebug_virtual_gb != n);
sdebug_virtual_gb = n;
@@ -4984,26 +6214,42 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
+ /* absolute number of hosts currently active is what is shown */
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
if (delta_hosts > 0) {
do {
- sdebug_add_adapter();
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
+ }
} while (--delta_hosts);
} else if (delta_hosts < 0) {
do {
- sdebug_remove_adapter();
+ sdebug_do_remove_host(false);
} while (++delta_hosts);
}
return count;
@@ -5087,14 +6333,19 @@ static DRIVER_ATTR_RO(ato);
static ssize_t map_show(struct device_driver *ddp, char *buf)
{
- ssize_t count;
+ ssize_t count = 0;
if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
- count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- (int)map_size, map_storep);
+ if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
+ struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
+
+ if (sip)
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, sip->map_storep);
+ }
buf[count++] = '\n';
buf[count] = '\0';
@@ -5102,6 +6353,24 @@ static ssize_t map_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(map);
+static ssize_t random_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
+}
+
+static ssize_t random_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_random = v;
+ return count;
+}
+static DRIVER_ATTR_RW(random);
+
static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
@@ -5178,12 +6447,51 @@ static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(cdb_len);
+static const char * const zbc_model_strs_a[] = {
+ [BLK_ZONED_NONE] = "none",
+ [BLK_ZONED_HA] = "host-aware",
+ [BLK_ZONED_HM] = "host-managed",
+};
+
+static const char * const zbc_model_strs_b[] = {
+ [BLK_ZONED_NONE] = "no",
+ [BLK_ZONED_HA] = "aware",
+ [BLK_ZONED_HM] = "managed",
+};
+
+static const char * const zbc_model_strs_c[] = {
+ [BLK_ZONED_NONE] = "0",
+ [BLK_ZONED_HA] = "1",
+ [BLK_ZONED_HM] = "2",
+};
+
+static int sdeb_zbc_model_str(const char *cp)
+{
+ int res = sysfs_match_string(zbc_model_strs_a, cp);
+
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_b, cp);
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_c, cp);
+ if (res < 0)
+ return -EINVAL;
+ }
+ }
+ return res;
+}
+
+static ssize_t zbc_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ zbc_model_strs_a[sdeb_zbc_model]);
+}
+static DRIVER_ATTR_RO(zbc);
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
directory) is that auxiliary actions can be triggered when an attribute
- is changed. For example see: sdebug_add_host_store() above.
+ is changed. For example see: add_host_store() above.
*/
static struct attribute *sdebug_drv_attrs[] = {
@@ -5203,6 +6511,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
&driver_attr_add_host.attr,
+ &driver_attr_per_host_store.attr,
&driver_attr_vpd_use_hostno.attr,
&driver_attr_sector_size.attr,
&driver_attr_statistics.attr,
@@ -5212,12 +6521,14 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_guard.attr,
&driver_attr_ato.attr,
&driver_attr_map.attr,
+ &driver_attr_random.attr,
&driver_attr_removable.attr,
&driver_attr_host_lock.attr,
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
&driver_attr_cdb_len.attr,
+ &driver_attr_zbc.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5226,11 +6537,13 @@ static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
+ bool want_store = (sdebug_fake_rw == 0);
unsigned long sz;
- int host_to_add;
- int k;
- int ret;
+ int k, ret, hosts_to_add;
+ int idx = -1;
+ ramdisk_lck_a[0] = &atomic_rw;
+ ramdisk_lck_a[1] = &atomic_rw2;
atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
@@ -5304,6 +6617,40 @@ static int __init scsi_debug_init(void)
for (k = 0; k < submit_queues; ++k)
spin_lock_init(&sdebug_q_arr[k].qc_lock);
+ /*
+ * check for host managed zoned block device specified with
+ * ptype=0x14 or zbc=XXX.
+ */
+ if (sdebug_ptype == TYPE_ZBC) {
+ sdeb_zbc_model = BLK_ZONED_HM;
+ } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
+ k = sdeb_zbc_model_str(sdeb_zbc_model_s);
+ if (k < 0) {
+ ret = k;
+ goto free_vm;
+ }
+ sdeb_zbc_model = k;
+ switch (sdeb_zbc_model) {
+ case BLK_ZONED_NONE:
+ case BLK_ZONED_HA:
+ sdebug_ptype = TYPE_DISK;
+ break;
+ case BLK_ZONED_HM:
+ sdebug_ptype = TYPE_ZBC;
+ break;
+ default:
+ pr_err("Invalid ZBC model\n");
+ return -EINVAL;
+ }
+ }
+ if (sdeb_zbc_model != BLK_ZONED_NONE) {
+ sdeb_zbc_in_use = true;
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
+ }
+
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
if (sdebug_dev_size_mb < 1)
sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)sdebug_dev_size_mb * 1048576;
@@ -5326,36 +6673,6 @@ static int __init scsi_debug_init(void)
sdebug_cylinders_per = (unsigned long)sdebug_capacity /
(sdebug_sectors_per * sdebug_heads);
}
-
- if (sdebug_fake_rw == 0) {
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 1\n");
- ret = -ENOMEM;
- goto free_q_arr;
- }
- if (sdebug_num_parts > 0)
- sdebug_build_parts(fake_storep, sz);
- }
-
- if (sdebug_dix) {
- int dif_size;
-
- dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
- dif_storep = vmalloc(dif_size);
-
- pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
-
- if (dif_storep == NULL) {
- pr_err("out of mem. (DIX)\n");
- ret = -ENOMEM;
- goto free_vm;
- }
-
- memset(dif_storep, 0xff, dif_size);
- }
-
- /* Logical Block Provisioning */
if (scsi_debug_lbp()) {
sdebug_unmap_max_blocks =
clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
@@ -5371,26 +6688,16 @@ static int __init scsi_debug_init(void)
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
ret = -EINVAL;
- goto free_vm;
+ goto free_q_arr;
}
-
- map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
-
- pr_info("%lu provisioning blocks\n", map_size);
-
- if (map_storep == NULL) {
- pr_err("out of mem. (MAP)\n");
- ret = -ENOMEM;
- goto free_vm;
+ }
+ xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ if (want_store) {
+ idx = sdebug_add_store();
+ if (idx < 0) {
+ ret = idx;
+ goto free_q_arr;
}
-
- bitmap_zero(map_storep, map_size);
-
- /* Map first 1KB for partition table */
- if (sdebug_num_parts)
- map_region(0, 2);
}
pseudo_primary = root_device_register("pseudo_0");
@@ -5410,18 +6717,28 @@ static int __init scsi_debug_init(void)
goto bus_unreg;
}
- host_to_add = sdebug_add_host;
+ hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
- pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
+ for (k = 0; k < hosts_to_add; k++) {
+ if (want_store && k == 0) {
+ ret = sdebug_add_host_helper(idx);
+ if (ret < 0) {
+ pr_err("add_host_helper k=%d, error=%d\n",
+ k, -ret);
+ break;
+ }
+ } else {
+ ret = sdebug_do_add_host(want_store &&
+ sdebug_per_host_store);
+ if (ret < 0) {
+ pr_err("add_host k=%d error=%d\n", k, -ret);
+ break;
+ }
}
}
-
if (sdebug_verbose)
- pr_info("built %d host(s)\n", sdebug_add_host);
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
return 0;
@@ -5430,9 +6747,7 @@ bus_unreg:
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
+ sdebug_erase_store(idx, NULL);
free_q_arr:
kfree(sdebug_q_arr);
return ret;
@@ -5440,20 +6755,18 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k = sdebug_add_host;
+ int k = sdebug_num_hosts;
stop_all_queued();
for (; k; k--)
- sdebug_remove_adapter();
+ sdebug_do_remove_host(true);
free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
- kfree(sdebug_q_arr);
+ sdebug_erase_all_stores(false);
+ xa_destroy(per_store_ap);
}
device_initcall(scsi_debug_init);
@@ -5467,29 +6780,146 @@ static void sdebug_release_adapter(struct device *dev)
kfree(sdbg_host);
}
-static int sdebug_add_adapter(void)
+/* idx must be valid, if sip is NULL then it will be obtained using idx */
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
{
- int k, devs_per_host;
- int error = 0;
+ if (idx < 0)
+ return;
+ if (!sip) {
+ if (xa_empty(per_store_ap))
+ return;
+ sip = xa_load(per_store_ap, idx);
+ if (!sip)
+ return;
+ }
+ vfree(sip->map_storep);
+ vfree(sip->dif_storep);
+ vfree(sip->storep);
+ xa_erase(per_store_ap, idx);
+ kfree(sip);
+}
+
+/* Assume apart_from_first==false only in shutdown case. */
+static void sdebug_erase_all_stores(bool apart_from_first)
+{
+ unsigned long idx;
+ struct sdeb_store_info *sip = NULL;
+
+ xa_for_each(per_store_ap, idx, sip) {
+ if (apart_from_first)
+ apart_from_first = false;
+ else
+ sdebug_erase_store(idx, sip);
+ }
+ if (apart_from_first)
+ sdeb_most_recent_idx = sdeb_first_idx;
+}
+
+/*
+ * Returns store xarray new element index (idx) if >=0 else negated errno.
+ * Limit the number of stores to 65536.
+ */
+static int sdebug_add_store(void)
+{
+ int res;
+ u32 n_idx;
+ unsigned long iflags;
+ unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+ struct sdeb_store_info *sip = NULL;
+ struct xa_limit xal = { .max = 1 << 16, .min = 0 };
+
+ sip = kzalloc(sizeof(*sip), GFP_KERNEL);
+ if (!sip)
+ return -ENOMEM;
+
+ xa_lock_irqsave(per_store_ap, iflags);
+ res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
+ if (unlikely(res < 0)) {
+ xa_unlock_irqrestore(per_store_ap, iflags);
+ kfree(sip);
+ pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
+ return res;
+ }
+ sdeb_most_recent_idx = n_idx;
+ if (sdeb_first_idx < 0)
+ sdeb_first_idx = n_idx;
+ xa_unlock_irqrestore(per_store_ap, iflags);
+
+ res = -ENOMEM;
+ sip->storep = vzalloc(sz);
+ if (!sip->storep) {
+ pr_err("user data oom\n");
+ goto err;
+ }
+ if (sdebug_num_parts > 0)
+ sdebug_build_parts(sip->storep, sz);
+
+ /* DIF/DIX: what T10 calls Protection Information (PI) */
+ if (sdebug_dix) {
+ int dif_size;
+
+ dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
+ sip->dif_storep = vmalloc(dif_size);
+
+ pr_info("dif_storep %u bytes @ %pK\n", dif_size,
+ sip->dif_storep);
+
+ if (!sip->dif_storep) {
+ pr_err("DIX oom\n");
+ goto err;
+ }
+ memset(sip->dif_storep, 0xff, dif_size);
+ }
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
+ map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+ sip->map_storep = vmalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(map_size)));
+
+ pr_info("%lu provisioning blocks\n", map_size);
+
+ if (!sip->map_storep) {
+ pr_err("LBP map oom\n");
+ goto err;
+ }
+
+ bitmap_zero(sip->map_storep, map_size);
+
+ /* Map first 1KB for partition table */
+ if (sdebug_num_parts)
+ map_region(sip, 0, 2);
+ }
+
+ rwlock_init(&sip->macc_lck);
+ return (int)n_idx;
+err:
+ sdebug_erase_store((int)n_idx, sip);
+ pr_warn("%s: failed, errno=%d\n", __func__, -res);
+ return res;
+}
+
+static int sdebug_add_host_helper(int per_host_idx)
+{
+ int k, devs_per_host, idx;
+ int error = -ENOMEM;
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
- if (sdbg_host == NULL) {
- pr_err("out of memory at line %d\n", __LINE__);
+ if (!sdbg_host)
return -ENOMEM;
- }
+ idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
+ if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
+ xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ sdbg_host->si_idx = idx;
INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
- if (!sdbg_devinfo) {
- pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ if (!sdbg_devinfo)
goto clean;
- }
}
spin_lock(&sdebug_host_list_lock);
@@ -5499,44 +6929,77 @@ static int sdebug_add_adapter(void)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
-
if (error)
goto clean;
- ++sdebug_add_host;
- return error;
+ ++sdebug_num_hosts;
+ return 0;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
-
kfree(sdbg_host);
+ pr_warn("%s: failed, errno=%d\n", __func__, -error);
return error;
}
-static void sdebug_remove_adapter(void)
+static int sdebug_do_add_host(bool mk_new_store)
{
+ int ph_idx = sdeb_most_recent_idx;
+
+ if (mk_new_store) {
+ ph_idx = sdebug_add_store();
+ if (ph_idx < 0)
+ return ph_idx;
+ }
+ return sdebug_add_host_helper(ph_idx);
+}
+
+static void sdebug_do_remove_host(bool the_end)
+{
+ int idx = -1;
struct sdebug_host_info *sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host2;
spin_lock(&sdebug_host_list_lock);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
- list_del(&sdbg_host->host_list);
+ idx = sdbg_host->si_idx;
+ }
+ if (!the_end && idx >= 0) {
+ bool unique = true;
+
+ list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
+ if (sdbg_host2 == sdbg_host)
+ continue;
+ if (idx == sdbg_host2->si_idx) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique) {
+ xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ if (idx == sdeb_most_recent_idx)
+ --sdeb_most_recent_idx;
+ }
}
+ if (sdbg_host)
+ list_del(&sdbg_host->host_list);
spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
device_unregister(&sdbg_host->dev);
- --sdebug_add_host;
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -5595,6 +7058,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip;
struct sdebug_dev_info *devip;
+
u8 *cmd = scp->cmnd;
int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
@@ -5724,7 +7188,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
fini:
- if (F_DELAY_OVERR & flags)
+ if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
sdebug_ndelay > 10000)) {
@@ -5866,8 +7330,9 @@ static int sdebug_driver_probe(struct device *dev)
pr_err("scsi_add_host failed\n");
error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else {
scsi_scan_host(hpnt);
+ }
return error;
}
@@ -5889,6 +7354,7 @@ static int sdebug_driver_remove(struct device *dev)
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 978be1602f71..927b1e641842 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1412,6 +1412,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip START_UNIT, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
stu_scmd = NULL;
@@ -1478,6 +1479,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip BDR, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
bdr_scmd = NULL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index df4905df5cd4..0ba7a65e7c8d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -202,24 +202,17 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
blk_mq_requeue_request(cmd->request, true);
}
-/*
- * Function: scsi_queue_insert()
- *
- * Purpose: Insert a command in the midlevel queue.
- *
- * Arguments: cmd - command that we are adding to queue.
- * reason - why we are inserting command to queue.
- *
- * Lock status: Assumed that lock is not held upon entry.
+/**
+ * scsi_queue_insert - Reinsert a command in the queue.
+ * @cmd: command that we are adding to queue.
+ * @reason: why we are inserting command to queue.
*
- * Returns: Nothing.
+ * We do this for one of two cases. Either the host is busy and it cannot accept
+ * any more commands for the time being, or the device returned QUEUE_FULL and
+ * can accept no more commands.
*
- * Notes: We do this for one of two cases. Either the host is busy
- * and it cannot accept any more commands for the time being,
- * or the device returned QUEUE_FULL and can accept no more
- * commands.
- * Notes: This could be called either from an interrupt context or a
- * normal process context.
+ * Context: This could be called either from an interrupt context or a normal
+ * process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
@@ -301,16 +294,12 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(__scsi_execute);
-/*
- * Function: scsi_init_cmd_errh()
- *
- * Purpose: Initialize cmd fields related to error handling.
- *
- * Arguments: cmd - command that is ready to be queued.
+/**
+ * scsi_init_cmd_errh - Initialize cmd fields related to error handling.
+ * @cmd: command that is ready to be queued.
*
- * Notes: This function has the job of initializing a number of
- * fields related to error handling. Typically this will
- * be called once for each command, as required.
+ * This function has the job of initializing a number of fields related to error
+ * handling. Typically this will be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
@@ -496,17 +485,11 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
+/**
+ * scsi_run_queue - Select a proper request queue to serve next.
+ * @q: last request's queue
*
- * Notes: The previous command was completely finished, start
- * a new one if possible.
+ * The previous command was completely finished, start a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
@@ -548,7 +531,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+static void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -560,7 +543,7 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
}
@@ -896,34 +879,27 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
return result;
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with blk_stat other than
- * BLK_STS_OK, to fail the remainder of the request.
+/**
+ * scsi_io_completion - Completion processing for SCSI commands.
+ * @cmd: command that is finished.
+ * @good_bytes: number of processed bytes.
+ *
+ * We will finish off the specified number of sectors. If we are done, the
+ * command block will be released and the queue function will be goosed. If we
+ * are not done then we have to figure out what to do next:
+ *
+ * a) We can call scsi_io_completion_reprep(). The request will be
+ * unprepared and put back on the queue. Then a new command will
+ * be created for it. This should be used if we made forward
+ * progress, or if we want to switch from READ(10) to READ(6) for
+ * example.
+ *
+ * b) We can call scsi_io_completion_action(). The request will be
+ * put back on the queue and retried using the same command as
+ * before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
@@ -951,8 +927,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Next deal with any sectors which we were able to correctly
- * handle. Failed, zero length commands always need to drop down
+ * Failed, zero length commands always need to drop down
* to retry code. Fast path should return in this block.
*/
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
@@ -986,16 +961,14 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
sdev->host->hostt->dma_need_drain(rq);
}
-/*
- * Function: scsi_init_io()
- *
- * Purpose: SCSI I/O initialize function.
- *
- * Arguments: cmd - Command descriptor we wish to initialize
+/**
+ * scsi_init_io - SCSI I/O initialization function.
+ * @cmd: command descriptor we wish to initialize
*
- * Returns: BLK_STS_OK on success
- * BLK_STS_RESOURCE if the failure is retryable
- * BLK_STS_IOERR if the failure is fatal
+ * Returns:
+ * * BLK_STS_OK - on success
+ * * BLK_STS_RESOURCE - if the failure is retryable
+ * * BLK_STS_IOERR - if the failure is fatal
*/
blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
{
@@ -1086,7 +1059,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
return BLK_STS_OK;
out_free_sgtables:
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
return ret;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1217,6 +1190,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
@@ -1226,9 +1200,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
cmd->sc_data_direction = DMA_FROM_DEVICE;
if (blk_rq_is_scsi(req))
- return scsi_setup_scsi_cmnd(sdev, req);
+ ret = scsi_setup_scsi_cmnd(sdev, req);
else
- return scsi_setup_fs_cmnd(sdev, req);
+ ret = scsi_setup_fs_cmnd(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ scsi_free_sgtables(cmd);
+
+ return ret;
}
static blk_status_t
@@ -1893,6 +1872,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
scsi_mq_inline_sgl_size(shost));
@@ -1901,21 +1881,21 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
cmd_size += sizeof(struct scsi_data_buffer) +
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
- memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ memset(tag_set, 0, sizeof(*tag_set));
if (shost->hostt->commit_rqs)
- shost->tag_set.ops = &scsi_mq_ops;
+ tag_set->ops = &scsi_mq_ops;
else
- shost->tag_set.ops = &scsi_mq_ops_no_commit;
- shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
- shost->tag_set.queue_depth = shost->can_queue;
- shost->tag_set.cmd_size = cmd_size;
- shost->tag_set.numa_node = NUMA_NO_NODE;
- shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- shost->tag_set.flags |=
+ tag_set->ops = &scsi_mq_ops_no_commit;
+ tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+ tag_set->queue_depth = shost->can_queue;
+ tag_set->cmd_size = cmd_size;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
- shost->tag_set.driver_data = shost;
+ tag_set->driver_data = shost;
- return blk_mq_alloc_tag_set(&shost->tag_set);
+ return blk_mq_alloc_tag_set(tag_set);
}
void scsi_mq_destroy_tags(struct Scsi_Host *shost)
@@ -1944,21 +1924,13 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
-/*
- * Function: scsi_block_requests()
- *
- * Purpose: Utility function used by low-level drivers to prevent further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
+/**
+ * scsi_block_requests - Utility function used by low-level drivers to prevent
+ * further commands from being queued to the device.
+ * @shost: host in question
*
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests().
*/
void scsi_block_requests(struct Scsi_Host *shost)
{
@@ -1966,25 +1938,15 @@ void scsi_block_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_block_requests);
-/*
- * Function: scsi_unblock_requests()
- *
- * Purpose: Utility function used by low-level drivers to allow further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
- *
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
- *
- * This is done as an API function so that changes to the
- * internals of the scsi mid-layer won't require wholesale
- * changes to drivers that use this feature.
+/**
+ * scsi_unblock_requests - Utility function used by low-level drivers to allow
+ * further commands to be queued to the device.
+ * @shost: host in question
+ *
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests(). This is done
+ * as an API function so that changes to the internals of the scsi mid-layer
+ * won't require wholesale changes to drivers that use this feature.
*/
void scsi_unblock_requests(struct Scsi_Host *shost)
{
@@ -2865,11 +2827,27 @@ scsi_host_block(struct Scsi_Host *shost)
struct scsi_device *sdev;
int ret = 0;
+ /*
+ * Call scsi_internal_device_block_nowait so we can avoid
+ * calling synchronize_rcu() for each LUN.
+ */
shost_for_each_device(sdev, shost) {
- ret = scsi_internal_device_block(sdev);
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_block_nowait(sdev);
+ mutex_unlock(&sdev->state_mutex);
if (ret)
break;
}
+
+ /*
+ * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
+ * calling synchronize_rcu() once is enough.
+ */
+ WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
+
+ if (!ret)
+ synchronize_rcu();
+
return ret;
}
EXPORT_SYMBOL_GPL(scsi_host_block);
@@ -2882,8 +2860,10 @@ scsi_host_unblock(struct Scsi_Host *shost, int new_state)
shost_for_each_device(sdev, shost) {
ret = scsi_internal_device_unblock(sdev, new_state);
- if (ret)
+ if (ret) {
+ scsi_device_put(sdev);
break;
+ }
}
return ret;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b2a803c51288..f4cc08eb47ba 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1616,6 +1616,12 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
+/*
+ * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
+ * against the kernel stop_connection recovery mechanism
+ */
+static DEFINE_MUTEX(conn_mutex);
+
static LIST_HEAD(sesslist);
static LIST_HEAD(sessdestroylist);
static DEFINE_SPINLOCK(sesslock);
@@ -2445,6 +2451,32 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+/*
+ * This can be called without the rx_queue_mutex, if invoked by the kernel
+ * stop work. But, in that case, it is guaranteed not to race with
+ * iscsi_destroy by conn_mutex.
+ */
+static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ /*
+ * It is important that this path doesn't rely on
+ * rx_queue_mutex, otherwise, a thread doing allocation on a
+ * start_session/start_connection could sleep waiting on a
+ * writeback to a failed iscsi device, that cannot be recovered
+ * because the lock is held. If we don't hold it here, the
+ * kernel stop_conn_work_fn has a chance to stop the broken
+ * session and resolve the allocation.
+ *
+ * Still, the user invoked .stop_conn() needs to be serialized
+ * with stop_conn_work_fn by a private mutex. Not pretty, but
+ * it works.
+ */
+ mutex_lock(&conn_mutex);
+ conn->transport->stop_conn(conn, flag);
+ mutex_unlock(&conn_mutex);
+
+}
+
static void stop_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn, *tmp;
@@ -2463,30 +2495,17 @@ static void stop_conn_work_fn(struct work_struct *work)
uint32_t sid = iscsi_conn_get_sid(conn);
struct iscsi_cls_session *session;
- mutex_lock(&rx_queue_mutex);
-
session = iscsi_session_lookup(sid);
if (session) {
if (system_state != SYSTEM_RUNNING) {
session->recovery_tmo = 0;
- conn->transport->stop_conn(conn,
- STOP_CONN_TERM);
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
} else {
- conn->transport->stop_conn(conn,
- STOP_CONN_RECOVER);
+ iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
}
}
list_del_init(&conn->conn_list_err);
-
- mutex_unlock(&rx_queue_mutex);
-
- /* we don't want to hold rx_queue_mutex for too long,
- * for instance if many conns failed at the same time,
- * since this stall other iscsi maintenance operations.
- * Give other users a chance to proceed.
- */
- cond_resched();
}
}
@@ -2846,8 +2865,11 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+
+ mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
+ mutex_unlock(&conn_mutex);
return 0;
}
@@ -3689,9 +3711,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
}
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
+ mutex_unlock(&conn_mutex);
+
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3713,9 +3738,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_UP;
+ mutex_unlock(&conn_mutex);
}
else
err = -EINVAL;
@@ -3723,17 +3750,20 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_STOP_CONN:
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (conn)
- transport->stop_conn(conn, ev->u.stop_conn.flag);
+ iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_SEND_PDU:
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn)
+ if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
+ mutex_unlock(&conn_mutex);
+ }
else
err = -EINVAL;
break;
@@ -4728,7 +4758,9 @@ static __init int iscsi_transport_init(void)
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+ iscsi_eh_timer_workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, "iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7b0383e42b4c..d90fefffe31b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -528,6 +528,21 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(max_write_same_blocks);
+static ssize_t
+zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ if (sdkp->device->type == TYPE_ZBC)
+ return sprintf(buf, "host-managed\n");
+ if (sdkp->zoned == 1)
+ return sprintf(buf, "host-aware\n");
+ if (sdkp->zoned == 2)
+ return sprintf(buf, "drive-managed\n");
+ return sprintf(buf, "none\n");
+}
+static DEVICE_ATTR_RO(zoned_cap);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -541,6 +556,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_zeroing_mode.attr,
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
+ &dev_attr_zoned_cap.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -2962,6 +2978,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
* with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ if (sdkp->zoned == 2 && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Drive-managed SMR disk\n");
}
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 713bce998b0e..3bdf0deb8f15 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -187,7 +187,7 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
hcp++;
dma += sizeof(struct hpc_chunk);
start += sizeof(struct hpc_chunk);
- };
+ }
hcp--;
hcp->desc.pnext = hdata->dma;
}
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index de0ab5fc8474..f4c666285bba 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -399,7 +399,7 @@ void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
-int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
+void snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 449b03f3bbd3..4cd86115cfb2 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -151,7 +151,7 @@ error:
/*
* snic_io_exch_ver_cmpl_handler
*/
-int
+void
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
@@ -160,7 +160,6 @@ snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
- int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
@@ -224,8 +223,6 @@ exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
-
- return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9b589d60a46..4dcd735ea49e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -51,6 +51,8 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
@@ -344,10 +346,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
case ILLEGAL_REQUEST:
if (!(SCpnt->sense_buffer[0] & 0x90))
break;
- error_sector = (SCpnt->sense_buffer[3] << 24) |
- (SCpnt->sense_buffer[4] << 16) |
- (SCpnt->sense_buffer[5] << 8) |
- SCpnt->sense_buffer[6];
+ error_sector =
+ get_unaligned_be32(&SCpnt->sense_buffer[3]);
if (SCpnt->request->bio != NULL)
block_sectors =
bio_sectors(SCpnt->request->bio);
@@ -495,13 +495,9 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->sdb.length = this_count * s_size;
}
- SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ put_unaligned_be32(block, &SCpnt->cmnd[2]);
SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
- SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ put_unaligned_be16(this_count, &SCpnt->cmnd[7]);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -853,8 +849,7 @@ static void get_sectorsize(struct scsi_cd *cd)
} else {
long last_written;
- cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
- (buffer[2] << 8) | buffer[3]);
+ cd->capacity = 1 + get_unaligned_be32(&buffer[0]);
/*
* READ_CAPACITY doesn't return the correct size on
* certain UDF media. If last_written is larger, use
@@ -865,8 +860,7 @@ static void get_sectorsize(struct scsi_cd *cd)
if (!cdrom_get_last_written(&cd->cdi, &last_written))
cd->capacity = max_t(long, cd->capacity, last_written);
- sector_size = (buffer[4] << 24) |
- (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ sector_size = get_unaligned_be32(&buffer[4]);
switch (sector_size) {
/*
* HP 4020i CD-Recorder reports 2340 byte sectors
@@ -954,13 +948,13 @@ static void get_capabilities(struct scsi_cd *cd)
}
n = data.header_length + data.block_descriptor_length;
- cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+ cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176;
cd->readcd_known = 1;
cd->readcd_cdda = buffer[n + 5] & 0x01;
/* print some capability bits */
sr_printk(KERN_INFO, cd,
"scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
- ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ get_unaligned_be16(&buffer[n + 14]) / 176,
cd->cdi.speed,
buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
buffer[n + 3] & 0x20 ? "dvd-ram " : "",
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c5f9b348b438..4bf4ab3b70f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1457,7 +1457,6 @@ static int st_flush(struct file *filp, fl_owner_t id)
accessing this tape. */
static int st_release(struct inode *inode, struct file *filp)
{
- int result = 0;
struct scsi_tape *STp = filp->private_data;
if (STp->door_locked == ST_LOCKED_AUTO)
@@ -1470,9 +1469,9 @@ static int st_release(struct inode *inode, struct file *filp)
scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
- return result;
+ return 0;
}
-
+
/* The checks common to both reading and writing */
static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
{
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index 5216d228cdd9..46bb905b4d6a 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -32,14 +32,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- return ret;
+ goto disable_pm;
}
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "Cannot claim MPHY clock.\n");
- return PTR_ERR(clk);
+ goto clk_err;
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
@@ -54,16 +54,23 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
- pm_runtime_put_sync(dev);
+ goto clk_err;
}
return ret;
+
+clk_err:
+ pm_runtime_put_sync(dev);
+disable_pm:
+ pm_runtime_disable(dev);
+ return ret;
}
static int ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 673c16596fb2..d56ce8d97d4e 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -30,6 +30,12 @@
#define ufs_mtk_device_reset_ctrl(high, res) \
ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
+ UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
+ END_FIX
+};
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -73,9 +79,9 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
if (status == PRE_CHANGE) {
if (host->unipro_lpm)
- hba->hba_enable_delay_us = 0;
+ hba->vps->hba_enable_delay_us = 0;
else
- hba->hba_enable_delay_us = 600;
+ hba->vps->hba_enable_delay_us = 600;
}
return 0;
@@ -263,6 +269,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -555,10 +565,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG) {
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+ if (mid == UFS_VENDOR_SAMSUNG)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
- }
/*
* Decide waiting time before gating reference clock and
@@ -575,6 +583,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
+
+ ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (mid == UFS_VENDOR_SAMSUNG)
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+}
+
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -589,6 +608,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
+ .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 19aa5c44e0da..2e6ddb5cdfc2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -572,7 +572,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
if (ufs_qcom_is_link_off(hba)) {
/*
@@ -587,7 +586,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
}
- return ret;
+ return 0;
}
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
@@ -1071,6 +1070,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ hba->caps |= UFSHCD_CAP_WB_EN;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -1658,11 +1658,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 92a63eebdca9..2d71d232a69d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -276,6 +276,10 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
+UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
+UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
+UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_device_type.attr,
@@ -304,6 +308,10 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_ext_feature_sup.attr,
+ &dev_attr_wb_presv_us_en.attr,
+ &dev_attr_wb_type.attr,
+ &dev_attr_wb_shared_alloc_units.attr,
NULL,
};
@@ -373,6 +381,12 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
+UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
+
static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_raw_device_capacity.attr,
@@ -404,6 +418,11 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_wb_max_alloc_units.attr,
+ &dev_attr_wb_max_wb_luns.attr,
+ &dev_attr_wb_buff_cap_adj.attr,
+ &dev_attr_wb_sup_red_type.attr,
+ &dev_attr_wb_sup_wb_type.attr,
NULL,
};
@@ -603,20 +622,29 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
.attrs = ufs_sysfs_string_descriptors,
};
+static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
+{
+ return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
+ (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+}
+
#define UFS_FLAG(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag); \
+ QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
static DEVICE_ATTR_RO(_name)
@@ -628,6 +656,9 @@ UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
UFS_FLAG(busy_rtc, _BUSY_RTC);
UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
+UFS_FLAG(wb_enable, _WB_EN);
+UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
+UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -638,6 +669,9 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_phy_resource_removal.attr,
&dev_attr_busy_rtc.attr,
&dev_attr_disable_fw_update.attr,
+ &dev_attr_wb_enable.attr,
+ &dev_attr_wb_flush_en.attr,
+ &dev_attr_wb_flush_during_h8.attr,
NULL,
};
@@ -646,6 +680,12 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
+{
+ return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
+ (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+}
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -653,9 +693,12 @@ static ssize_t _name##_show(struct device *dev, \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
int ret; \
+ u8 index = 0; \
+ if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
@@ -679,6 +722,11 @@ UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
UFS_ATTRIBUTE(psa_state, _PSA_STATE);
UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
+UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
+UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
+UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
+UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
+
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
@@ -697,6 +745,10 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_ffu_status.attr,
&dev_attr_psa_state.attr,
&dev_attr_psa_data_size.attr,
+ &dev_attr_wb_flush_status.attr,
+ &dev_attr_wb_avail_buf.attr,
+ &dev_attr_wb_life_time_est.attr,
+ &dev_attr_wb_cur_buf.attr,
NULL,
};
@@ -748,6 +800,8 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
+
static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_boot_lun_id.attr,
@@ -763,6 +817,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 990cb48e2403..c70845d41449 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -64,6 +64,9 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
@@ -140,6 +143,9 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
};
/* Attribute idn for Query requests */
@@ -168,6 +174,10 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
};
/* Descriptor idn for Query requests */
@@ -191,9 +201,9 @@ enum desc_header_offset {
};
enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x59,
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x2D,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
@@ -219,6 +229,7 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
/* Device descriptor parameters offsets in bytes*/
@@ -258,6 +269,10 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -302,6 +317,11 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
};
/* Health descriptor parameters offsets in bytes*/
@@ -313,6 +333,12 @@ enum health_desc_param {
HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -333,6 +359,11 @@ enum {
UFSHCD_AMP = 3,
};
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+};
+
#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -447,6 +478,8 @@ enum ufs_dev_pwr_mode {
UFS_POWERDOWN_PWR_MODE = 3,
};
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
@@ -532,11 +565,17 @@ struct ufs_dev_info {
bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
u8 max_lu_supported;
+ u8 wb_dedicated_lu;
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ u32 d_ext_ufs_feature_sup;
+ u8 b_wb_buffer_type;
+ u32 d_wb_alloc_units;
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index df7a1e6805a3..e3175a63c676 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -101,4 +101,11 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 698e8d20b4ba..5db18f444ea9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -48,6 +48,8 @@
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs_bsg.h"
+#include <asm/unaligned.h>
+#include <linux/blkdev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -92,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default delay of RPM device flush delayed work */
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
+
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
@@ -251,6 +256,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
@@ -272,6 +283,25 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ ret = ufshcd_wb_ctrl(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+ else
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+ __func__, ret);
+ ufshcd_wb_toggle_flush(hba, true);
+}
+
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
@@ -535,21 +565,21 @@ void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
-/*
+/**
* ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- * @can_sleep - perform sleep or just spin
+ * @hba: per-adapter interface
+ * @reg: mmio register offset
+ * @mask: mask to apply to the read register value
+ * @val: value to wait for
+ * @interval_us: polling interval in microseconds
+ * @timeout_ms: timeout in milliseconds
*
- * Returns -ETIMEDOUT on error, zero on success
+ * Return:
+ * -ETIMEDOUT on error, zero on success.
*/
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep)
+ unsigned long timeout_ms)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -558,10 +588,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- if (can_sleep)
- usleep_range(interval_us, interval_us + 50);
- else
- udelay(interval_us);
+ usleep_range(interval_us, interval_us + 50);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -1150,10 +1177,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret)
+ if (ret) {
ufshcd_scale_clks(hba, false);
+ goto out_unprepare;
+ }
}
+ /* Enable Write Booster if we have scaled up else disable it */
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_wb_ctrl(hba, scale_up);
+ down_write(&hba->clk_scaling_lock);
+
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
@@ -1319,23 +1353,6 @@ start_window:
return 0;
}
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
- .upthreshold = 70,
- .downdifferential = 5,
-};
-
-static void *gov_data = &ufs_ondemand_data;
-#else
-static void *gov_data; /* NULL */
-#endif
-
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1351,12 +1368,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
- ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
- gov_data);
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
+ &hba->vps->ondemand_data);
devfreq = devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
+ &hba->vps->devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- gov_data);
+ &hba->vps->ondemand_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -2560,7 +2577,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000, true);
+ mask, ~mask, 1000, 1000);
return err;
}
@@ -2747,13 +2764,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
@@ -2774,16 +2791,17 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @hba: per-adapter instance
* @opcode: flag query to perform
* @idn: flag idn to access
+ * @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res)
+ enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
- int err, index = 0, selector = 0;
+ int err, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -3223,7 +3241,7 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
struct uc_string_id {
u8 len;
u8 type;
- wchar_t uc[0];
+ wchar_t uc[];
} __packed;
/* replace non-printable or non-ASCII characters with spaces */
@@ -4137,10 +4155,10 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
int i;
int err;
- bool flag_res = 1;
+ bool flag_res = true;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -4151,7 +4169,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
/* poll for max. 1000 iterations for fDeviceInit flag to clear */
for (i = 0; i < 1000 && !err && flag_res; i++)
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (err)
dev_err(hba->dev,
@@ -4229,16 +4247,23 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
- * @can_sleep: perform sleep or just spin
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
+ unsigned long flags;
int err;
+ /*
+ * Obtain the host lock to prevent that the controller is disabled
+ * while the UFS interrupt handler is active on another CPU.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1, can_sleep);
+ 10, 1);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4259,7 +4284,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -4279,7 +4304,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- ufshcd_delay_us(hba->hba_enable_delay_us, 100);
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
retry = 50;
@@ -4966,7 +4991,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
goto out;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
__func__, err);
@@ -5016,7 +5041,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
__func__, err);
@@ -5161,6 +5186,190 @@ out:
__func__, err);
}
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+ int ret;
+ u8 index;
+ enum query_opcode opcode;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return 0;
+
+ if (!(enable ^ hba->wb_enabled))
+ return 0;
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
+ if (ret) {
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+ return ret;
+ }
+
+ hba->wb_enabled = enable;
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+ int val;
+ u8 index;
+
+ if (set)
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ return ufshcd_query_flag_retry(hba, val,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+ index, NULL);
+}
+
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+{
+ if (enable)
+ ufshcd_wb_buf_flush_enable(hba);
+ else
+ ufshcd_wb_buf_flush_disable(hba);
+
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret)
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+ __func__, ret);
+ else
+ hba->wb_buf_flush_enabled = true;
+
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+ return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret) {
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+ __func__, ret);
+ } else {
+ hba->wb_buf_flush_enabled = false;
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+ u32 avail_buf)
+{
+ u32 cur_buf;
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+ index, 0, &cur_buf);
+ if (ret) {
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!cur_buf) {
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+ cur_buf);
+ return false;
+ }
+ /* Let it continue to flush when available buffer exceeds threshold */
+ if (avail_buf < hba->vps->wb_flush_threshold)
+ return true;
+
+ return false;
+}
+
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
+{
+ int ret;
+ u32 avail_buf;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return false;
+ /*
+ * The ufs device needs the vcc to be ON to flush.
+ * With user-space reduction enabled, it's enough to enable flush
+ * by checking only the available buffer. The threshold
+ * defined here is > 90% full.
+ * With user-space preserved enabled, the current-buffer
+ * should be checked too because the wb buffer size can reduce
+ * when disk tends to be full. This info is provided by current
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
+ * keeping vcc on when current buffer is empty.
+ */
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+ index, 0, &avail_buf);
+ if (ret) {
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!hba->dev_info.b_presrv_uspc_en) {
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
+ return true;
+ return false;
+ }
+
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+}
+
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
+ struct ufs_hba,
+ rpm_dev_flush_recheck_work);
+ /*
+ * To prevent unnecessary VCC power drain after device finishes
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
+ * after a certain delay to recheck the threshold by next runtime
+ * suspend.
+ */
+ pm_runtime_get_sync(hba->dev);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -5723,7 +5932,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000, true);
+ mask, 0, 1000, 1000);
out:
return err;
}
@@ -6299,8 +6508,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshcd_hba_stop(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba, false);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
@@ -6603,6 +6813,93 @@ out:
return ret;
}
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u8 lun;
+ u32 d_lu_wb_buf_alloc;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
+ goto wb_disabled;
+
+ hba->dev_info.d_ext_ufs_feature_sup =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ goto wb_disabled;
+
+ /*
+ * WB may be supported but not configured while provisioning.
+ * The spec says, in dedicated wb buffer mode,
+ * a max of 1 lun would have wb buffer configured.
+ * Now only shared buffer mode is supported.
+ */
+ hba->dev_info.b_wb_buffer_type =
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+
+ hba->dev_info.b_presrv_uspc_en =
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
+
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
+ hba->dev_info.d_wb_alloc_units =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
+ if (!hba->dev_info.d_wb_alloc_units)
+ goto wb_disabled;
+ } else {
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
+ d_lu_wb_buf_alloc = 0;
+ ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
+ (u8 *)&d_lu_wb_buf_alloc,
+ sizeof(d_lu_wb_buf_alloc));
+ if (d_lu_wb_buf_alloc) {
+ hba->dev_info.wb_dedicated_lu = lun;
+ break;
+ }
+ }
+
+ if (!d_lu_wb_buf_alloc)
+ goto wb_disabled;
+ }
+ return;
+
+wb_disabled:
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
+}
+
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
+{
+ struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (!fixups)
+ return;
+
+ for (f = fixups; f->quirk; f++) {
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
+{
+ /* fix by general quirk table */
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
+
+ /* allow vendors to fix quirks */
+ ufshcd_vops_fixup_dev_quirks(hba);
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -6639,6 +6936,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -6647,6 +6945,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ ufs_fixup_device_setup(hba);
+
+ /*
+ * Probe WB only for UFS-3.1 devices or UFS devices with quirk
+ * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
+ */
+ if (dev_info->wspecversion >= 0x310 ||
+ dev_info->wspecversion == 0x220 ||
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
+ ufshcd_wb_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -6666,21 +6975,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- for (f = ufs_fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
@@ -6996,9 +7290,6 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
bool flag;
int ret;
- /* Clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-
/* Init check for device descriptor sizes */
ufshcd_init_desc_sizes(hba);
@@ -7017,10 +7308,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
ufshcd_get_ref_clk_gating_wait(hba);
- ufs_fixup_device_setup(hba);
-
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
hba->dev_info.f_power_on_wp_en = flag;
/* Probe maximum power mode co-supported by both UFS host and device */
@@ -7149,6 +7438,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -7195,6 +7485,16 @@ static const struct attribute_group *ufshcd_driver_groups[] = {
NULL,
};
+static struct ufs_hba_variant_params ufs_hba_vps = {
+ .hba_enable_delay_us = 1000,
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
+ .devfreq_profile.polling_ms = 100,
+ .devfreq_profile.target = ufshcd_devfreq_target,
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
+ .ondemand_data.upthreshold = 70,
+ .ondemand_data.downdifferential = 5,
+};
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
@@ -7774,7 +8074,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -7809,6 +8109,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
*
* Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
* in low power state which would save some power.
+ *
+ * If Write Booster is enabled and the device needs to flush the WB
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -7938,16 +8241,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
}
- }
+ /*
+ * If device needs to do BKOP or WB buffer flush during
+ * Hibern8, keep device power mode as "active power mode"
+ * and VCC supply.
+ */
+ hba->dev_info.b_rpm_dev_flush_capable =
+ hba->auto_bkops_enabled ||
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
+ ufshcd_wb_need_flush(hba));
+ }
+
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op)) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
}
flush_work(&hba->eeh_work);
@@ -8000,9 +8318,16 @@ enable_gating:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+ hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
out:
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
+ }
+
hba->pm_op_in_progress = 0;
+
if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
@@ -8055,9 +8380,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
else
goto vendor_suspend;
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is
+ * required since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
@@ -8087,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ hba->dev_info.b_rpm_dev_flush_capable = false;
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
+ }
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8313,7 +8647,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
@@ -8422,7 +8756,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- hba->hba_enable_delay_us = 1000;
+ hba->vps = &ufs_hba_vps;
err = ufshcd_hba_init(hba);
if (err)
@@ -8560,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
+ ufshcd_rpm_dev_flush_recheck_work);
+
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6ffc08ad85f6..bf97d616e597 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -69,6 +69,7 @@
#include <scsi/scsi_eh.h>
#include "ufs.h"
+#include "ufs_quirks.h"
#include "ufshci.h"
#define UFSHCD "ufshcd"
@@ -336,6 +337,7 @@ struct ufs_hba_variant_ops {
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -555,6 +557,20 @@ enum ufshcd_caps {
* for userspace to control the power management.
*/
UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
};
/**
@@ -654,6 +670,7 @@ struct ufs_hba {
int nutmrs;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
void *priv;
unsigned int irq;
bool is_irq_enabled;
@@ -675,7 +692,6 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
- u16 hba_enable_delay_us;
bool is_powered;
/* Work Queues */
@@ -727,6 +743,9 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
+ bool wb_buf_flush_enabled;
+ bool wb_enabled;
+ struct delayed_work rpm_dev_flush_recheck_work;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -775,6 +794,11 @@ static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
}
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -808,7 +832,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep);
+ unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
u32 reg);
@@ -845,6 +869,13 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
}
+static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
+{
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ return hba->dev_info.wb_dedicated_lu;
+ return 0;
+}
+
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -932,11 +963,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res);
+ enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -1071,6 +1102,12 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->fixup_dev_quirks)
+ hba->vops->fixup_dev_quirks(hba);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c3f010df641e..8dbb4db6831a 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -908,7 +908,7 @@ static int pvscsi_host_reset(struct scsi_cmnd *cmd)
use_msg = adapter->use_msg;
if (use_msg) {
- adapter->use_msg = 0;
+ adapter->use_msg = false;
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/*
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 89183b3b178f..45ba07c6ec27 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1228,18 +1228,20 @@ void iscsit_print_session_params(struct iscsi_session *sess)
iscsi_dump_sess_ops(sess->sess_ops);
}
-static int iscsit_do_rx_data(
+int rx_data(
struct iscsi_conn *conn,
- struct iscsi_data_count *count)
+ struct kvec *iov,
+ int iov_count,
+ int data)
{
- int data = count->data_length, rx_loop = 0, total_rx = 0;
+ int rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data);
+ iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data);
while (msg_data_left(&msg)) {
rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
@@ -1256,26 +1258,6 @@ static int iscsit_do_rx_data(
return total_rx;
}
-int rx_data(
- struct iscsi_conn *conn,
- struct kvec *iov,
- int iov_count,
- int data)
-{
- struct iscsi_data_count c;
-
- if (!conn || !conn->sock || !conn->conn_ops)
- return -1;
-
- memset(&c, 0, sizeof(struct iscsi_data_count));
- c.iov = iov;
- c.iov_count = iov_count;
- c.data_length = data;
- c.type = ISCSI_RX_DATA;
-
- return iscsit_do_rx_data(conn, &c);
-}
-
int tx_data(
struct iscsi_conn *conn,
struct kvec *iov,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3305b47fdf53..16d5a4e117a2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+static int tcm_loop_queue_data_or_status(const char *func,
+ struct se_cmd *se_cmd, u8 scsi_status)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
-
- sc->result = SAM_STAT_GOOD;
- set_host_byte(sc, DID_OK);
- if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
- (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
- scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
- return 0;
-}
-
-static int tcm_loop_queue_status(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
- struct scsi_cmnd *sc = tl_cmd->sc;
-
- pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
+ func, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
@@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
} else
- sc->result = se_cmd->scsi_status;
+ sc->result = scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
@@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
return 0;
}
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__,
+ se_cmd, se_cmd->scsi_status);
+}
+
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 385e4cf9cfa6..6b72afee2f8b 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -677,7 +677,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -1090,7 +1090,7 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
- if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return -ENODEV;
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
@@ -1920,7 +1920,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2177,7 +2177,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2263,7 +2263,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ff82b21fdcce..f04352285155 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1099,21 +1099,73 @@ static ssize_t block_size_store(struct config_item *item,
static ssize_t alua_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
}
+static ssize_t alua_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
+ pr_err("dev[%p]: Unable to change SE Device alua_support:"
+ " alua_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ return count;
+}
+
static ssize_t pgr_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
}
+static ssize_t pgr_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ pr_err("dev[%p]: Unable to change SE Device pgr_support:"
+ " pgr_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ return count;
+}
+
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
@@ -1146,8 +1198,8 @@ CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
-CONFIGFS_ATTR_RO(, alua_support);
-CONFIGFS_ATTR_RO(, pgr_support);
+CONFIGFS_ATTR(, alua_support);
+CONFIGFS_ATTR(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1203,12 +1255,24 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
+ &attr_emulate_pr,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
+/*
+ * pr related dev_attrib attributes for devices passing through CDBs,
+ * but allowing in core pr emulation.
+ */
+struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
+ &attr_enforce_pr_isids,
+ &attr_force_pr_aptpl,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
+
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
@@ -1642,7 +1706,7 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
@@ -1784,7 +1848,7 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -1798,7 +1862,7 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1811,7 +1875,7 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1858,7 +1922,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
u8 type = 0;
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 4cee1138284b..46b0e1ceb77f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -100,9 +100,10 @@ out_unlock:
*/
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
@@ -174,9 +175,10 @@ out_unlock:
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx for %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return -ENODEV;
}
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
@@ -732,6 +734,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->se_hba = hba;
dev->transport = hba->backend->ops;
+ dev->transport_flags = dev->transport->transport_flags_default;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
@@ -1100,7 +1103,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
* emulate the response, since tcmu does not have the information
* required to process these commands.
*/
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5e931690e697..91e41cc55704 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4086,7 +4086,7 @@ target_check_reservation(struct se_cmd *cmd)
return 0;
if (!dev->dev_attrib.emulate_pr)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return 0;
spin_lock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c9d92b3e777d..4e37fa9b409d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1070,9 +1070,9 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
- TRANSPORT_FLAG_PASSTHROUGH_ALUA |
- TRANSPORT_FLAG_PASSTHROUGH_PGR,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA |
+ TRANSPORT_FLAG_PASSTHROUGH_PGR,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index d24e0a3ba3ff..62aa5fa63ac0 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -582,8 +582,7 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags &
- TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 264a822c0bfa..da37af9c3a5e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1397,7 +1397,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -2012,7 +2012,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
@@ -2126,7 +2126,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index f769bb1e3735..28fb9441de7a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -882,41 +882,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
- struct timer_list *timer)
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
-
- if (tcmu_cmd->cmd_id)
- goto setup_timer;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
- udev->name, tmo / MSEC_PER_SEC);
-
-setup_timer:
if (!tmo)
- return 0;
+ return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
- return 0;
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
- int ret;
/*
* For backwards compat if qfull_time_out is not set use
@@ -931,13 +914,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
else
tmo = TCMU_TIME_OUT;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
- if (ret)
- return ret;
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
- pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
return 0;
}
@@ -959,7 +940,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, ret;
+ int iov_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
@@ -1060,14 +1041,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
- &udev->cmd_timer);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/*
@@ -1279,50 +1267,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
- struct tcmu_dev *udev = cmd->tcmu_dev;
- u8 scsi_status;
struct se_cmd *se_cmd;
- bool is_running;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
- return 0;
if (!time_after(jiffies, cmd->deadline))
- return 0;
+ return;
- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
- if (is_running) {
- /*
- * If cmd_time_out is disabled but qfull is set deadline
- * will only reflect the qfull timeout. Ignore it.
- */
- if (!udev->cmd_time_out)
- return 0;
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- /*
- * target_complete_cmd will translate this to LUN COMM FAILURE
- */
- scsi_status = SAM_STAT_CHECK_CONDITION;
- list_del_init(&cmd->queue_entry);
- cmd->se_cmd = NULL;
- } else {
- list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
- tcmu_free_cmd(cmd);
- scsi_status = SAM_STAT_TASK_SET_FULL;
- }
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
- target_complete_cmd(se_cmd, scsi_status);
- return 0;
+ if (!time_after(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1407,16 +1384,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
- bool drained = true;
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
- return true;
+ return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
@@ -1425,11 +1401,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
- pr_debug("removing cmd %u on dev %s from queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
@@ -1444,10 +1419,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
- pr_debug("cmd %u on dev %s failed with %u\n",
- tcmu_cmd->cmd_id, udev->name, scsi_ret);
-
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
@@ -1462,13 +1435,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
- drained = false;
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
- return drained;
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1652,6 +1623,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
idr_destroy(&udev->commands);
WARN_ON(!all_expired);
@@ -2037,9 +2010,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
- continue;
-
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2077,6 +2047,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
del_timer(&udev->cmd_timer);
+ run_qfull_queue(udev, false);
+
mutex_unlock(&udev->cmdr_lock);
}
@@ -2617,7 +2589,9 @@ static struct configfs_attribute *tcmu_action_attrs[] = {
static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
@@ -2698,6 +2672,7 @@ static void find_free_blocks(void)
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
@@ -2708,9 +2683,24 @@ static void check_timedout_devices(void)
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
@@ -2753,12 +2743,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
- for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
@@ -2767,13 +2757,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_genl;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
tcmu_attrs[i] = passthrough_attrib_attrs[i];
- }
- for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
- tcmu_attrs[i] = tcmu_attrib_attrs[k];
- i++;
- }
+ for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = tcmu_attrib_attrs[k];
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c39952243fd3..8b104f76f324 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2280,6 +2280,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.fabric_name = "vhost",
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 48325d7790f8..8cb76405cbce 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -828,6 +828,7 @@ struct qed_common_cb_ops {
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
+ void (*bw_update)(void *dev);
};
struct qed_selftest_ops {
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a5d8ae49198c..4726c1bbec65 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -324,7 +324,7 @@ struct ssp_response_iu {
__be32 response_data_len;
u8 resp_data[0];
- u8 sense_data[0];
+ u8 sense_data[];
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -346,7 +346,7 @@ struct ssp_command_iu {
u8 add_cdb_len:6;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
@@ -555,7 +555,7 @@ struct ssp_response_iu {
__be32 response_data_len;
u8 resp_data[0];
- u8 sense_data[0];
+ u8 sense_data[];
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -577,7 +577,7 @@ struct ssp_command_iu {
u8 _r_c:2;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index f93c0b800790..e76bac4d14c5 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -68,7 +68,6 @@ struct scsi_pointer {
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
- struct list_head list; /* scsi_cmnd participates in queue lists */
struct list_head eh_entry; /* entry for the host eh_cmd_q */
struct delayed_work abort_work;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 591cd9e4692c..4fda324f4b35 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -301,16 +301,6 @@ struct iscsi_queue_req {
struct list_head qr_list;
};
-struct iscsi_data_count {
- int data_length;
- int sync_and_steering;
- enum data_count_type type;
- u32 iov_count;
- u32 ss_iov_count;
- u32 ss_marker_count;
- struct kvec *iov;
-};
-
struct iscsi_param_list {
bool iser;
struct list_head param_list;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1b752d8ea529..f51452e3b984 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -23,7 +23,8 @@ struct target_backend_ops {
char inquiry_rev[4];
struct module *owner;
- u8 transport_flags;
+ u8 transport_flags_default;
+ u8 transport_flags_changeable;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
@@ -94,6 +95,7 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct configfs_attribute *sbc_attrib_attrs[];
extern struct configfs_attribute *passthrough_attrib_attrs[];
+extern struct configfs_attribute *passthrough_pr_attrib_attrs[];
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6d4a694f6ea7..18c3f277b770 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -772,6 +772,7 @@ struct se_device {
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
#define DF_READ_ONLY 0x00000020
+ u8 transport_flags;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
index b71f680968eb..5857cf682ee7 100644
--- a/include/trace/events/qla.h
+++ b/include/trace/events/qla.h
@@ -9,6 +9,11 @@
#define QLA_MSG_MAX 256
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
DECLARE_EVENT_CLASS(qla_log_event,
TP_PROTO(const char *buf,
struct va_format *vaf),
@@ -27,6 +32,8 @@ DECLARE_EVENT_CLASS(qla_log_event,
TP_printk("%s %s", __get_str(buf), __get_str(msg))
);
+#pragma GCC diagnostic pop
+
DEFINE_EVENT(qla_log_event, ql_dbg_log,
TP_PROTO(const char *buf, struct va_format *vaf),
TP_ARGS(buf, vaf)