summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-15 22:06:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-15 22:06:26 -0700
commit72f02ba66bd83b54054da20eae550123de84da6f (patch)
tree96a8360400e040aa2e38e7352594dbbc485461db
parentdb06f826ec12bf0701ea7fc0a3c0aa00b84417c8 (diff)
parent51372570ac3c919b036e760f4ca449e81cf8e995 (diff)
downloadlinux-72f02ba66bd83b54054da20eae550123de84da6f.tar.gz
linux-72f02ba66bd83b54054da20eae550123de84da6f.tar.bz2
linux-72f02ba66bd83b54054da20eae550123de84da6f.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: mpt3sas, lpfc, qla2xxx, hisi_sas, smartpqi, megaraid_sas, arcmsr. In addition, with the continuing absence of Nic we have target updates for tcmu and target core (all with reviews and acks). The biggest observable change is going to be that we're (again) trying to switch to mulitqueue as the default (a user can still override the setting on the kernel command line). Other major core stuff is the removal of the remaining Microchannel drivers, an update of the internal timers and some reworks of completion and result handling" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (203 commits) scsi: core: use blk_mq_run_hw_queues in scsi_kick_queue scsi: ufs: remove unnecessary query(DM) UPIU trace scsi: qla2xxx: Fix issue reported by static checker for qla2x00_els_dcmd2_sp_done() scsi: aacraid: Spelling fix in comment scsi: mpt3sas: Fix calltrace observed while running IO & reset scsi: aic94xx: fix an error code in aic94xx_init() scsi: st: remove redundant pointer STbuffer scsi: qla2xxx: Update driver version to 10.00.00.08-k scsi: qla2xxx: Migrate NVME N2N handling into state machine scsi: qla2xxx: Save frame payload size from ICB scsi: qla2xxx: Fix stalled relogin scsi: qla2xxx: Fix race between switch cmd completion and timeout scsi: qla2xxx: Fix Management Server NPort handle reservation logic scsi: qla2xxx: Flush mailbox commands on chip reset scsi: qla2xxx: Fix unintended Logout scsi: qla2xxx: Fix session state stuck in Get Port DB scsi: qla2xxx: Fix redundant fc_rport registration scsi: qla2xxx: Silent erroneous message scsi: qla2xxx: Prevent sysfs access when chip is down scsi: qla2xxx: Add longer window for chip reset ...
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-hisi.txt41
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt10
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi18
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c11
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/scsi/3w-9xxx.c6
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/Kconfig33
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR_D700.c405
-rw-r--r--drivers/scsi/NCR_D700.h30
-rw-r--r--drivers/scsi/NCR_Q720.c376
-rw-r--r--drivers/scsi/NCR_Q720.h29
-rw-r--r--drivers/scsi/a100u2w.c4
-rw-r--r--drivers/scsi/aacraid/aachba.c41
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/aacraid/src.c6
-rw-r--r--drivers/scsi/advansys.c10
-rw-r--r--drivers/scsi/aha152x.c71
-rw-r--r--drivers/scsi/aha1740.c9
-rw-r--r--drivers/scsi/aha1740.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/atp870u.c16
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c23
-rw-r--r--drivers/scsi/bfa/bfad_im.c19
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c115
-rw-r--r--drivers/scsi/csiostor/csio_wr.c84
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c5
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/dc395x.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c12
-rw-r--r--drivers/scsi/gdth.c67
-rw-r--r--drivers/scsi/gdth.h10
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c319
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c290
-rw-r--r--drivers/scsi/hosts.c32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c25
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c47
-rw-r--r--drivers/scsi/libfc/fc_lport.c108
-rw-r--r--drivers/scsi/libfc/fc_rport.c100
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c47
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c462
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h46
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/megaraid.c29
-rw-r--r--drivers/scsi/megaraid.h14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c61
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c36
-rw-r--r--drivers/scsi/mesh.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c438
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c81
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c395
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c461
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c10
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c30
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c731
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c154
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c155
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c20
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi.h3
-rw-r--r--drivers/scsi/scsi_debug.c57
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c405
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsi_typedefs.h2
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/sg.c7
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h12
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c160
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c6
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/ufs/Kconfig9
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c619
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h115
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c21
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c55
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c53
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_transport.c285
-rw-r--r--drivers/target/target_core_ua.c43
-rw-r--r--drivers/target/target_core_ua.h3
-rw-r--r--drivers/target/target_core_user.c377
-rw-r--r--drivers/target/target_core_xcopy.c5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c10
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/usb/gadget/function/f_tcm.c19
-rw-r--r--drivers/vhost/scsi.c16
-rw-r--r--drivers/xen/xen-scsiback.c17
-rw-r--r--fs/sysfs/file.c44
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--lib/Makefile2
-rw-r--r--lib/klist.c10
-rw-r--r--lib/percpu_ida.c370
212 files changed, 5294 insertions, 4660 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
new file mode 100644
index 000000000000..a48c44817367
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
@@ -0,0 +1,41 @@
+* Hisilicon Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains one of the following -
+ "hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs
+ host controller present on Hi36xx chipset.
+- reg : should contain UFS register address space & UFS SYS CTRL register address,
+- interrupt-parent : interrupt device
+- interrupts : interrupt number
+- clocks : List of phandle and clock specifier pairs
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "ref_clk", "phy_clk" is optional
+- freq-table-hz : Array of <min max> operating frequencies stored in the same
+ order as the clocks property. If this property is not
+ defined or a value in the array is "0" then it is assumed
+ that the frequency is set by the parent clock or a
+ fixed rate clock source.
+- resets : describe reset node register
+- reset-names : reset node register, the "rst" corresponds to reset the whole UFS IP.
+
+Example:
+
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index c39dfef76a18..2df00524bd21 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -41,6 +41,8 @@ Optional properties:
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
+- resets : reset node register
+- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
@@ -61,9 +63,11 @@ Example:
vccq-max-microamp = 200000;
vccq2-max-microamp = 200000;
- clocks = <&core 0>, <&ref 0>, <&iface 0>;
- clock-names = "core_clk", "ref_clk", "iface_clk";
- freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
+ clocks = <&core 0>, <&ref 0>, <&phy 0>, <&iface 0>;
+ clock-names = "core_clk", "ref_clk", "phy_clk", "iface_clk";
+ freq-table-hz = <100000000 200000000>, <0 0>, <0 0>, <0 0>;
+ resets = <&reset 0 1>;
+ reset-names = "rst";
phys = <&ufsphy1>;
phy-names = "ufsphy";
};
diff --git a/MAINTAINERS b/MAINTAINERS
index 64ddcd5041ae..3d08725527aa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9847,12 +9847,6 @@ F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
-NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
-M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/NCR_D700.*
-
NCSI LIBRARY:
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
S: Maintained
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 8d477dcbfa58..851190a719ea 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1000,6 +1000,24 @@
reset-gpios = <&gpio11 1 0 >;
};
+ /* UFS */
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
+
/* SD */
dwmmc1: dwmmc1@ff37f000 {
#address-cells = <1>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f9a186f6af8a..2c07e233012b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -193,6 +193,7 @@ CONFIG_SCSI_HISI_SAS=y
CONFIG_SCSI_HISI_SAS_PCI=y
CONFIG_SCSI_UFSHCD=m
CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_HISI=y
CONFIG_SCSI_UFS_QCOM=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cc71c63df381..984b37647b2f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6424,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->n_tags = ATA_MAX_QUEUE;
host->dev = dev;
host->ops = ops;
+ kref_init(&host->kref);
}
void __ata_port_probe(struct ata_port *ap)
@@ -7391,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
EXPORT_SYMBOL_GPL(ata_cable_unknown);
EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);
+EXPORT_SYMBOL_GPL(ata_host_get);
+EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9e21c49cf6be..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-extern void ata_host_get(struct ata_host *host);
-extern void ata_host_put(struct ata_host *host);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3081c629a7f7..1ae638b58b63 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2029,8 +2029,7 @@ static void srpt_release_channel_work(struct work_struct *w)
target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
@@ -2221,16 +2220,16 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
@@ -3597,11 +3596,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
- * @group: Not used.
* @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct srpt_port *sport = wwn->priv;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a625ac4e2872..e6b4ae558767 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -642,6 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
+ /* else: fall through */
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
@@ -1779,7 +1780,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
struct proc_dir_entry *dent;
#endif
- ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
+ ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
@@ -1886,6 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
+ /* fall through */
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
@@ -1930,6 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
+ /* fall through */
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 4470630dd545..8d22d6134a89 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2514,8 +2514,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
if (mpt_config(ioc, &cfg) == 0) {
ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
if (strlen(pdata->BoardTracerNumber) > 1) {
- strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
- karg.serial_number[24-1]='\0';
+ strlcpy(karg.serial_number,
+ pdata->BoardTracerNumber, 24);
}
}
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 06b175420be9..b15fdc626fb8 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1292,7 +1292,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
- ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptfc_probe;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76a66da33996..b8cf2658649e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4327,6 +4327,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
}
}
mpt_findImVolumes(ioc);
+ /* fall through */
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 99ba4a770406..27521fc3ef5a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
- if (twa_reset_sequence(tw_dev, 0))
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENOMEM;
goto out_iounmap;
+ }
/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cf9f2a09b47d..40c1e6e64f58 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ retval = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f6179e3d6953..471366945bd4 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1925,7 +1925,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
- /* Save done function into Scsi_Cmnd struct */
+ /* Save done function into struct scsi_cmnd */
SCpnt->scsi_done = done;
/* Queue the command and get a request id */
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 35c909bbf8ba..8fc851a9e116 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -49,6 +49,7 @@ config SCSI_NETLINK
config SCSI_MQ_DEFAULT
bool "SCSI: use blk-mq I/O path by default"
+ default y
depends on SCSI
---help---
This option enables the new blk-mq based I/O path for SCSI
@@ -841,18 +842,6 @@ config SCSI_IZIP_SLOW_CTR
Generally, saying N is fine.
-config SCSI_NCR_D700
- tristate "NCR Dual 700 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Dual 700 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_LASI700
tristate "HP Lasi SCSI support for 53c700/710"
depends on GSC && SCSI
@@ -1000,21 +989,9 @@ config SCSI_ZALON
used on the add-in Bluefish, Barracuda & Shrike SCSI cards.
Say Y here if you have one of these machines or cards.
-config SCSI_NCR_Q720
- tristate "NCR Quad 720 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Quad 720 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_NCR53C8XX_DEFAULT_TAGS
int "default tagged command queue depth"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "8"
---help---
"Tagged command queuing" is a feature of SCSI-2 which improves
@@ -1040,7 +1017,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
config SCSI_NCR53C8XX_MAX_TAGS
int "maximum number of queued commands"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "32"
---help---
This option allows you to specify the maximum number of commands
@@ -1057,7 +1034,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
config SCSI_NCR53C8XX_SYNC
int "synchronous transfers frequency in MHz"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "20"
---help---
The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
@@ -1091,7 +1068,7 @@ config SCSI_NCR53C8XX_SYNC
config SCSI_NCR53C8XX_NO_DISCONNECT
bool "not allow targets to disconnect"
- depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
+ depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0
help
This option is only provided for safety if you suspect some SCSI
device of yours to not support properly the target-disconnect
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 768953881c9e..6d71b2a9592b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -77,8 +77,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
-obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
@@ -180,7 +178,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
-DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
-NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
deleted file mode 100644
index b39a2409a507..000000000000
--- a/drivers/scsi/NCR_D700.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-/* Notes:
- *
- * Most of the work is done in the chip specific module, 53c700.o
- *
- * TODO List:
- *
- * 1. Extract the SCSI ID from the voyager CMOS table (necessary to
- * support multi-host environments.
- *
- * */
-
-
-/* CHANGELOG
- *
- * Version 2.2
- *
- * Added mca_set_adapter_name().
- *
- * Version 2.1
- *
- * Modularise the driver into a Board piece (this file) and a chip
- * piece 53c700.[ch] and 53c700.scr, added module options. You can
- * now specify the scsi id by the parameters
- *
- * NCR_D700=slot:<n> [siop:<n>] id:<n> ....
- *
- * They need to be comma separated if compiled into the kernel
- *
- * Version 2.0
- *
- * Initial implementation of TCQ (Tag Command Queueing). TCQ is full
- * featured and uses the clock algorithm to keep track of outstanding
- * tags and guard against individual tag starvation. Also fixed a bug
- * in all of the 1.x versions where the D700_data_residue() function
- * was returning results off by 32 bytes (and thus causing the same 32
- * bytes to be written twice corrupting the data block). It turns out
- * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
- * like the 53c710 (The 710 is the only data manual still available,
- * which I'd been using to program the 700).
- *
- * Version 1.2
- *
- * Much improved message handling engine
- *
- * Version 1.1
- *
- * Add code to handle selection reasonably correctly. By the time we
- * get the selection interrupt, we've already responded, but drop off the
- * bus and hope the selector will go away.
- *
- * Version 1.0:
- *
- * Initial release. Fully functional except for procfs and tag
- * command queueing. Has only been tested on cards with 53c700-66
- * chips and only single ended. Features are
- *
- * 1. Synchronous data transfers to offset 8 (limit of 700-66) and
- * 100ns (10MHz) limit of SCSI-2
- *
- * 2. Disconnection and reselection
- *
- * Testing:
- *
- * I've only really tested this with the 700-66 chip, but have done
- * soak tests in multi-device environments to verify that
- * disconnections and reselections are being processed correctly.
- * */
-
-#define NCR_D700_VERSION "2.2"
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_transport_spi.h>
-
-#include "53c700.h"
-#include "NCR_D700.h"
-
-static char *NCR_D700; /* command line from insmod */
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
-MODULE_LICENSE("GPL");
-module_param(NCR_D700, charp, 0);
-
-static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
- { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
-
-#ifdef MODULE
-#define ARG_SEP ' '
-#else
-#define ARG_SEP ','
-#endif
-
-static int __init
-param_setup(char *string)
-{
- char *pos = string, *next;
- int slot = -1, siop = -1;
-
- while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
- int val = (int)simple_strtoul(++next, NULL, 0);
-
- if(!strncmp(pos, "slot:", 5))
- slot = val;
- else if(!strncmp(pos, "siop:", 5))
- siop = val;
- else if(!strncmp(pos, "id:", 3)) {
- if(slot == -1) {
- printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
- } else if(slot > MCA_MAX_SLOT_NR) {
- printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
- } else {
- if(siop != 0 && siop != 1) {
- id_array[slot*2] = val;
- id_array[slot*2 + 1] =val;
- } else {
- id_array[slot*2 + siop] = val;
- }
- }
- }
- if((pos = strchr(pos, ARG_SEP)) != NULL)
- pos++;
- }
- return 1;
-}
-
-/* Host template. The 53c700 routine NCR_700_detect will
- * fill in all of the missing routines */
-static struct scsi_host_template NCR_D700_driver_template = {
- .module = THIS_MODULE,
- .name = "NCR Dual 700 MCA",
- .proc_name = "NCR_D700",
- .this_id = 7,
-};
-
-/* We needs this helper because we have two hosts per struct device */
-struct NCR_D700_private {
- struct device *dev;
- struct Scsi_Host *hosts[2];
- char name[30];
- char pad;
-};
-
-static int
-NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
- int slot, u32 region, int differential)
-{
- struct NCR_700_Host_Parameters *hostdata;
- struct Scsi_Host *host;
- int ret;
-
- hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
- "data, detatching\n", siop);
- return -ENOMEM;
- }
-
- if (!request_region(region, 64, "NCR_D700")) {
- printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
- region);
- ret = -ENODEV;
- goto region_failed;
- }
-
- /* Fill in the three required pieces of hostdata */
- hostdata->base = ioport_map(region, 64);
- hostdata->differential = (((1<<siop) & differential) != 0);
- hostdata->clock = NCR_D700_CLOCK_MHZ;
- hostdata->burst_length = 8;
-
- /* and register the siop */
- host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
- if (!host) {
- ret = -ENOMEM;
- goto detect_failed;
- }
-
- p->hosts[siop] = host;
- /* FIXME: read this from SUS */
- host->this_id = id_array[slot * 2 + siop];
- host->irq = irq;
- host->base = region;
- scsi_scan_host(host);
-
- return 0;
-
- detect_failed:
- release_region(region, 64);
- region_failed:
- kfree(hostdata);
-
- return ret;
-}
-
-static irqreturn_t
-NCR_D700_intr(int irq, void *data)
-{
- struct NCR_D700_private *p = (struct NCR_D700_private *)data;
- int i, found = 0;
-
- for (i = 0; i < 2; i++)
- if (p->hosts[i] &&
- NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
- found++;
-
- return found ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/* Detect a D700 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int
-NCR_D700_probe(struct device *dev)
-{
- struct NCR_D700_private *p;
- int differential;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i;
- int pos3j, pos3k, pos3a, pos3b, pos4;
- __u32 base_addr, offset_addr;
-
- /* enable board interrupt */
- pos4 = mca_device_read_pos(mca_dev, 4);
- pos4 |= 0x4;
- mca_device_write_pos(mca_dev, 4, pos4);
-
- mca_device_write_pos(mca_dev, 6, 9);
- pos3j = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 10);
- pos3k = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 0);
- pos3a = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 1);
- pos3b = mca_device_read_pos(mca_dev, 3);
-
- base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
- offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
-
- irq = (pos4 & 0x3) + 11;
- if(irq >= 13)
- irq++;
- if(banner) {
- printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
- "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
- "NCR D700:\n");
- banner = 0;
- }
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
- base_addr = mca_device_transform_ioport(mca_dev, base_addr);
- offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
-
- printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
-
- /*outb(BOARD_RESET, base_addr);*/
-
- /* clear any pending interrupts */
- (void)inb(base_addr + 0x08);
- /* get modctl, used later for setting diff bits */
- switch(differential = (inb(base_addr + 0x08) >> 6)) {
- case 0x00:
- /* only SIOP1 differential */
- differential = 0x02;
- break;
- case 0x01:
- /* Both SIOPs differential */
- differential = 0x03;
- break;
- case 0x03:
- /* No SIOPs differential */
- differential = 0x00;
- break;
- default:
- printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
- differential);
- differential = 0x00;
- break;
- }
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->dev = dev;
- snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
- if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
- printk(KERN_ERR "D700: request_irq failed\n");
- kfree(p);
- return -EBUSY;
- }
- /* plumb in both 700 chips */
- for (i = 0; i < 2; i++) {
- int err;
-
- if ((err = NCR_D700_probe_one(p, i, irq, slot,
- offset_addr + (0x80 * i),
- differential)) != 0)
- printk("D700: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_D700");
- dev_set_drvdata(dev, p);
- return 0;
-}
-
-static void
-NCR_D700_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- NCR_700_release(host);
- kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
- free_irq(host->irq, host);
- release_region(host->base, 64);
-}
-
-static int
-NCR_D700_remove(struct device *dev)
-{
- struct NCR_D700_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < 2; i++)
- NCR_D700_remove_one(p->hosts[i]);
-
- kfree(p);
- return 0;
-}
-
-static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
-
-static struct mca_driver NCR_D700_driver = {
- .id_table = NCR_D700_id_table,
- .driver = {
- .name = "NCR_D700",
- .bus = &mca_bus_type,
- .probe = NCR_D700_probe,
- .remove = NCR_D700_remove,
- },
-};
-
-static int __init NCR_D700_init(void)
-{
-#ifdef MODULE
- if (NCR_D700)
- param_setup(NCR_D700);
-#endif
-
- return mca_register_driver(&NCR_D700_driver);
-}
-
-static void __exit NCR_D700_exit(void)
-{
- mca_unregister_driver(&NCR_D700_driver);
-}
-
-module_init(NCR_D700_init);
-module_exit(NCR_D700_exit);
-
-__setup("NCR_D700=", param_setup);
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h
deleted file mode 100644
index eb675d782ef6..000000000000
--- a/drivers/scsi/NCR_D700.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_D700_H
-#define _NCR_D700_H
-
-/* Don't turn on debugging messages */
-#undef NCR_D700_DEBUG
-
-/* The MCA identifier */
-#define NCR_D700_MCA_ID 0x0092
-
-/* Defines for the Board registers */
-#define BOARD_RESET 0x80 /* board level reset */
-#define ADD_PARENB 0x04 /* Address Parity Enabled */
-#define DAT_PARENB 0x01 /* Data Parity Enabled */
-#define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */
-#define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */
-#define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */
-#define LED0RED 0xDF /* Led 0 (red 0; green 1) */
-#define LED1RED 0xBF /* Led 1 (red 0; green 1) */
-
-#define NCR_D700_CLOCK_MHZ 50
-
-#endif
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
deleted file mode 100644
index 54e7d26908ee..000000000000
--- a/drivers/scsi/NCR_Q720.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#include "ncr53c8xx.h"
-
-#include "NCR_Q720.h"
-
-static struct ncr_chip q720_chip __initdata = {
- .revision_id = 0x0f,
- .burst_max = 3,
- .offset_max = 8,
- .nr_divisor = 4,
- .features = FE_WIDE | FE_DIFF | FE_VARCLK,
-};
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
-MODULE_LICENSE("GPL");
-
-#define NCR_Q720_VERSION "0.9"
-
-/* We needs this helper because we have up to four hosts per struct device */
-struct NCR_Q720_private {
- struct device *dev;
- void __iomem * mem_base;
- __u32 phys_mem_base;
- __u32 mem_size;
- __u8 irq;
- __u8 siops;
- __u8 irq_enable;
- struct Scsi_Host *hosts[4];
-};
-
-static struct scsi_host_template NCR_Q720_tpnt = {
- .module = THIS_MODULE,
- .proc_name = "NCR_Q720",
-};
-
-static irqreturn_t
-NCR_Q720_intr(int irq, void *data)
-{
- struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
- __u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
- __u8 siop;
-
- sir |= ~p->irq_enable;
-
- if(sir == 0xff)
- return IRQ_NONE;
-
-
- while((siop = ffz(sir)) < p->siops) {
- sir |= 1<<siop;
- ncr53c8xx_intr(irq, p->hosts[siop]);
- }
- return IRQ_HANDLED;
-}
-
-static int __init
-NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
- int irq, int slot, __u32 paddr, void __iomem *vaddr)
-{
- struct ncr_device device;
- __u8 scsi_id;
- static int unit = 0;
- __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
- __u8 version;
- int error;
-
- scsi_id = scsr1 >> 4;
- /* enable burst length 16 (FIXME: should allow this) */
- scsr1 |= 0x02;
- /* force a siop reset */
- scsr1 |= 0x04;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
- udelay(10);
- version = readb(vaddr + 0x18) >> 4;
-
- memset(&device, 0, sizeof(struct ncr_device));
- /* Initialise ncr_device structure with items required by ncr_attach. */
- device.chip = q720_chip;
- device.chip.revision_id = version;
- device.host_id = scsi_id;
- device.dev = p->dev;
- device.slot.base = paddr;
- device.slot.base_c = paddr;
- device.slot.base_v = vaddr;
- device.slot.irq = irq;
- device.differential = differential ? 2 : 0;
- printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
- (unsigned long)paddr, differential, version);
-
- p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
-
- if (!p->hosts[siop])
- goto fail;
-
- p->irq_enable |= (1<<siop);
- scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- /* clear the disable interrupt bit */
- scsr1 &= ~0x01;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
-
- error = scsi_add_host(p->hosts[siop], p->dev);
- if (error)
- ncr53c8xx_release(p->hosts[siop]);
- else
- scsi_scan_host(p->hosts[siop]);
- return error;
-
- fail:
- return -ENODEV;
-}
-
-/* Detect a Q720 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int __init
-NCR_Q720_probe(struct device *dev)
-{
- struct NCR_Q720_private *p;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i, siops;
- __u8 pos2, pos4, asr2, asr9, asr10;
- __u16 io_base;
- __u32 base_addr, mem_size;
- void __iomem *mem_base;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- pos2 = mca_device_read_pos(mca_dev, 2);
- /* enable device */
- pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
- mca_device_write_pos(mca_dev, 2, pos2);
-
- io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
-
-
- if(banner) {
- printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
- "NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
- "NCR Q720:\n");
- banner = 0;
- }
- io_base = mca_device_transform_ioport(mca_dev, io_base);
-
- /* OK, this is phase one of the bootstrap, we now know the
- * I/O space base address. All the configuration registers
- * are mapped here (including pos) */
-
- /* sanity check I/O mapping */
- i = inb(io_base) | (inb(io_base+1)<<8);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
- kfree(p);
- return -ENODEV;
- }
-
- /* Phase II, find the ram base and memory map the board register */
- pos4 = inb(io_base + 4);
- /* enable streaming data */
- pos4 |= 0x01;
- outb(pos4, io_base + 4);
- base_addr = (pos4 & 0x7e) << 20;
- base_addr += (pos4 & 0x80) << 23;
- asr10 = inb(io_base + 0x12);
- base_addr += (asr10 & 0x80) << 24;
- base_addr += (asr10 & 0x70) << 23;
-
- /* OK, got the base addr, now we need to find the ram size,
- * enable and map it */
- asr9 = inb(io_base + 0x11);
- i = (asr9 & 0xc0) >> 6;
- if(i == 0)
- mem_size = 1024;
- else
- mem_size = 1 << (19 + i);
-
- /* enable the sram mapping */
- asr9 |= 0x20;
-
- /* disable the rom mapping */
- asr9 &= ~0x10;
-
- outb(asr9, io_base + 0x11);
-
- if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
- printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
- (unsigned long)base_addr,
- (unsigned long)(base_addr + mem_size));
- goto out_free;
- }
-
- if (dma_declare_coherent_memory(dev, base_addr, base_addr,
- mem_size, 0)) {
- printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
- goto out_release_region;
- }
-
- /* The first 1k of the memory buffer is a memory map of the registers
- */
- mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
- 1024);
- if (IS_ERR(mem_base)) {
- printk("NCR_Q720 failed to reserve memory mapped region\n");
- goto out_release;
- }
-
- /* now also enable accesses in asr 2 */
- asr2 = inb(io_base + 0x0a);
-
- asr2 |= 0x01;
-
- outb(asr2, io_base + 0x0a);
-
- /* get the number of SIOPs (this should be 2 or 4) */
- siops = ((asr2 & 0xe0) >> 5) + 1;
-
- /* sanity check mapping (again) */
- i = readw(mem_base);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
- goto out_release;
- }
-
- irq = readb(mem_base + 5) & 0x0f;
-
-
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
-
- printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
- printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
-
- p->dev = dev;
- p->mem_base = mem_base;
- p->phys_mem_base = base_addr;
- p->mem_size = mem_size;
- p->irq = irq;
- p->siops = siops;
-
- if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
- printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
- goto out_release;
- }
- /* disable all the siop interrupts */
- for(i = 0; i < siops; i++) {
- void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
- __u8 scsr1 = readb(reg_scsr1);
- scsr1 |= 0x01;
- writeb(scsr1, reg_scsr1);
- }
-
- /* plumb in all 720 chips */
- for (i = 0; i < siops; i++) {
- void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- int err;
-
- outb(0xff, port + 0x40);
- outb(0x07, port + 0x41);
- if ((err = NCR_Q720_probe_one(p, i, irq, slot,
- siop_p_base, siop_v_base)) != 0)
- printk("Q720: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_Q720");
- dev_set_drvdata(dev, p);
-
- return 0;
-
- out_release:
- dma_release_declared_memory(dev);
- out_release_region:
- release_mem_region(base_addr, mem_size);
- out_free:
- kfree(p);
-
- return -ENODEV;
-}
-
-static void __exit
-NCR_Q720_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- ncr53c8xx_release(host);
-}
-
-static int __exit
-NCR_Q720_remove(struct device *dev)
-{
- struct NCR_Q720_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < p->siops; i++)
- if(p->hosts[i])
- NCR_Q720_remove_one(p->hosts[i]);
-
- dma_release_declared_memory(dev);
- release_mem_region(p->phys_mem_base, p->mem_size);
- free_irq(p->irq, p);
- kfree(p);
- return 0;
-}
-
-static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
-
-static struct mca_driver NCR_Q720_driver = {
- .id_table = NCR_Q720_id_table,
- .driver = {
- .name = "NCR_Q720",
- .bus = &mca_bus_type,
- .probe = NCR_Q720_probe,
- .remove = NCR_Q720_remove,
- },
-};
-
-static int __init
-NCR_Q720_init(void)
-{
- int ret = ncr53c8xx_init();
- if (!ret)
- ret = mca_register_driver(&NCR_Q720_driver);
- if (ret)
- ncr53c8xx_exit();
- return ret;
-}
-
-static void __exit
-NCR_Q720_exit(void)
-{
- mca_unregister_driver(&NCR_Q720_driver);
- ncr53c8xx_exit();
-}
-
-module_init(NCR_Q720_init);
-module_exit(NCR_Q720_exit);
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h
deleted file mode 100644
index d5f46cdb736e..000000000000
--- a/drivers/scsi/NCR_Q720.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_Q720_H
-#define _NCR_Q720_H
-
-/* The MCA identifier */
-#define NCR_Q720_MCA_ID 0x0720
-
-#define NCR_Q720_CLOCK_MHZ 30
-
-#define NCR_Q720_POS2_BOARD_ENABLE 0x01
-#define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02
-#define NCR_Q720_POS2_PARITY_DISABLE 0x04
-#define NCR_Q720_POS2_IO_MASK 0xf8
-#define NCR_Q720_POS2_IO_SHIFT 8
-
-#define NCR_Q720_CHIP_REGISTER_OFFSET 0x200
-#define NCR_Q720_SCSR_OFFSET 0x070
-#define NCR_Q720_SIOP_SHIFT 0x080
-
-#endif
-
-
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b2942ec3d455..23b17621b6d2 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -143,7 +143,7 @@ static u8 wait_chip_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
- mdelay(100);
+ msleep(100);
}
return 0;
}
@@ -155,7 +155,7 @@ static u8 wait_firmware_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
- mdelay(100); /* wait 100ms before try again */
+ msleep(100); /* wait 100ms before try again */
}
return 0;
}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a57f3a7d4748..6e356325d8d9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -115,8 +115,6 @@
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
-#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
-
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
@@ -2961,7 +2959,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
/* Issue FIB to tell Firmware to flush it's cache */
@@ -2989,7 +2988,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -3000,7 +3001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -3009,7 +3012,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
@@ -3040,7 +3045,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
if (dev->in_reset)
@@ -3089,7 +3095,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3115,7 +3122,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3194,7 +3202,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case MODE_SENSE_10:
@@ -3271,7 +3280,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case REQUEST_SENSE:
@@ -3280,7 +3290,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0,
sizeof(struct sense_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -3290,7 +3301,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
/*
* These commands are all No-Ops
@@ -3314,7 +3326,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case START_STOP:
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d62ddd63f4fe..6e1b022a823d 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -514,7 +514,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
- * is not requesed the Fib will just be deallocaed by the DPC
+ * is not requested the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
* further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 417ba349e10e..ddc69738375f 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -65,7 +65,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
- * back to the system. If no response was requesed we just
+ * back to the system. If no response was requested we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 620166694171..576cdf9cc120 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -319,7 +319,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
union aac_init *init;
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 882f40353b96..efa96c1c6aa3 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -251,7 +251,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev)
* Fill in the remaining pieces of the init.
*/
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 4ebb35a29caa..7a51ccfa8662 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -409,7 +409,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
init = dev->init;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
- init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r8.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa),
@@ -417,7 +418,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else {
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 24e57e770432..713f69033f20 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2416,8 +2416,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %u, host_no %d,\n",
- atomic_read(&s->host_busy), s->host_no);
+ printk(" host_busy %d, host_no %d,\n",
+ scsi_host_busy(s), s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3182,8 +3182,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
- atomic_read(&shost->host_busy), shost->max_id,
+ " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n",
+ scsi_host_busy(shost), shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
@@ -8466,7 +8466,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
}
/*
- * Execute a single 'Scsi_Cmnd'.
+ * Execute a single 'struct scsi_cmnd'.
*/
static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
{
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index bc0058df31c6..4d7b0e0adbf7 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -422,16 +422,16 @@ enum aha152x_state {
*
*/
struct aha152x_hostdata {
- Scsi_Cmnd *issue_SC;
+ struct scsi_cmnd *issue_SC;
/* pending commands to issue */
- Scsi_Cmnd *current_SC;
+ struct scsi_cmnd *current_SC;
/* current command on the bus */
- Scsi_Cmnd *disconnected_SC;
+ struct scsi_cmnd *disconnected_SC;
/* commands that disconnected */
- Scsi_Cmnd *done_SC;
+ struct scsi_cmnd *done_SC;
/* command that was completed */
spinlock_t lock;
@@ -510,7 +510,7 @@ struct aha152x_hostdata {
*
*/
struct aha152x_scdata {
- Scsi_Cmnd *next; /* next sc in queue */
+ struct scsi_cmnd *next; /* next sc in queue */
struct completion *done;/* semaphore to block on */
struct scsi_eh_save ses;
};
@@ -633,7 +633,7 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
static void done(struct Scsi_Host *shpnt, int error);
/* diagnostics */
-static void show_command(Scsi_Cmnd * ptr);
+static void show_command(struct scsi_cmnd * ptr);
static void show_queues(struct Scsi_Host *shpnt);
static void disp_enintr(struct Scsi_Host *shpnt);
@@ -642,9 +642,9 @@ static void disp_enintr(struct Scsi_Host *shpnt);
* queue services:
*
*/
-static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
{
- Scsi_Cmnd *end;
+ struct scsi_cmnd *end;
SCNEXT(new_SC) = NULL;
if (!*SC)
@@ -656,9 +656,9 @@ static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
}
}
-static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
+static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr = *SC;
if (ptr) {
@@ -668,9 +668,10 @@ static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
return ptr;
}
-static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
+static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC,
+ int target, int lun)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
@@ -689,9 +690,10 @@ static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
return ptr;
}
-static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
+static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC,
+ struct scsi_cmnd *SCp)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && SCp!=ptr;
@@ -912,8 +914,9 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
/*
* Queue a command and setup interrupts for a free bus.
*/
-static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
- int phase, void (*done)(Scsi_Cmnd *))
+static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
+ struct completion *complete,
+ int phase, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
@@ -987,7 +990,8 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
* queue a command
*
*/
-static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
return aha152x_internal_queue(SCpnt, NULL, 0, done);
}
@@ -998,7 +1002,7 @@ static DEF_SCSI_QCMD(aha152x_queue)
/*
*
*/
-static void reset_done(Scsi_Cmnd *SCpnt)
+static void reset_done(struct scsi_cmnd *SCpnt)
{
if(SCSEM(SCpnt)) {
complete(SCSEM(SCpnt));
@@ -1011,10 +1015,10 @@ static void reset_done(Scsi_Cmnd *SCpnt)
* Abort a command
*
*/
-static int aha152x_abort(Scsi_Cmnd *SCpnt)
+static int aha152x_abort(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -1052,7 +1056,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
* Reset a device
*
*/
-static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
+static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
DECLARE_COMPLETION(done);
@@ -1110,13 +1114,14 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
return ret;
}
-static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
+static void free_hard_reset_SCs(struct Scsi_Host *shpnt,
+ struct scsi_cmnd **SCs)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr=*SCs;
while(ptr) {
- Scsi_Cmnd *next;
+ struct scsi_cmnd *next;
if(SCDATA(ptr)) {
next = SCNEXT(ptr);
@@ -1171,7 +1176,7 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
* Reset the bus
*
*/
-static int aha152x_bus_reset(Scsi_Cmnd *SCpnt)
+static int aha152x_bus_reset(struct scsi_cmnd *SCpnt)
{
return aha152x_bus_reset_host(SCpnt->device->host);
}
@@ -1436,7 +1441,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if(!(DONE_SC->SCp.phase & not_issued)) {
struct aha152x_scdata *sc;
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
sc = SCDATA(ptr);
@@ -1451,7 +1456,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
}
if(DONE_SC && DONE_SC->scsi_done) {
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
/* turn led off, when no commands are in the driver */
@@ -2247,13 +2252,13 @@ static void parerr_run(struct Scsi_Host *shpnt)
*/
static void rsti_run(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
ptr=DISCONNECTED_SC;
while(ptr) {
- Scsi_Cmnd *next = SCNEXT(ptr);
+ struct scsi_cmnd *next = SCNEXT(ptr);
if (!ptr->device->soft_reset) {
remove_SC(&DISCONNECTED_SC, ptr);
@@ -2438,7 +2443,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
/*
* Show the command data of a command
*/
-static void show_command(Scsi_Cmnd *ptr)
+static void show_command(struct scsi_cmnd *ptr)
{
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2462,7 +2467,7 @@ static void show_command(Scsi_Cmnd *ptr)
*/
static void show_queues(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -2484,7 +2489,7 @@ static void show_queues(struct Scsi_Host *shpnt)
disp_enintr(shpnt);
}
-static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
+static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
int i;
@@ -2813,7 +2818,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
{
int i;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
seq_puts(m, AHA152X_REVID "\n");
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index b48d5436f094..786bf7f32c64 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -207,11 +207,11 @@ static int aha1740_test_port(unsigned int base)
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
- void (*my_done)(Scsi_Cmnd *);
+ void (*my_done)(struct scsi_cmnd *);
int errstatus, adapstat;
int number_serviced;
struct ecb *ecbptr;
- Scsi_Cmnd *SCtmp;
+ struct scsi_cmnd *SCtmp;
unsigned int base;
unsigned long flags;
int handled = 0;
@@ -311,7 +311,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
@@ -520,7 +521,7 @@ static int aha1740_biosparam(struct scsi_device *sdev,
return 0;
}
-static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy)
{
/*
* From Alan Cox :
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index dfdaa4d3ea4e..6eeed6da0b54 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -135,8 +135,8 @@ struct ecb { /* Enhanced Control Block 6.1 */
/* Hardware defined portion ends here, rest is driver defined */
u8 sense[MAX_SENSE]; /* Sense area */
u8 status[MAX_STATUS]; /* Status area */
- Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
- void (*done) (Scsi_Cmnd *); /* Completion Function */
+ struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done) (struct scsi_cmnd *); /* Completion Function */
};
#define AHA1740CMD_NOP 0x00 /* No OP */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 80e5b283fd81..1391e5f35918 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
- if (!aic94xx_transport_template)
+ if (!aic94xx_transport_template) {
+ err = -ENOMEM;
goto out_destroy_caches;
+ }
err = pci_register_driver(&aic94xx_pci_driver);
if (err)
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 2e51ccc510e8..9c397a2794d6 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.05-20180309"
+#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 732b5d9242f1..12316ef4c893 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1061,6 +1061,13 @@ static int arcmsr_resume(struct pci_dev *pdev)
pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
+ writel(0, &acb->pmuE->host_int_status);
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ acb->doneq_index = 0;
+ }
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
if (set_date_time)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index b46997cf77e2..8996d2329e11 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1055,7 +1055,7 @@ static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on)
udelay(2); /* 2 deskew delay(45ns*2=90ns) */
val &= 0x007f; /* no bsy */
atp_writew_io(dev, 0, 0x1c, val);
- mdelay(128);
+ msleep(128);
val &= 0x00fb; /* after 1ms no msg */
atp_writew_io(dev, 0, 0x1c, val);
while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0)
@@ -1286,9 +1286,9 @@ static void atp870_init(struct Scsi_Host *shpnt)
k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10;
atp_writeb_base(atpdev, 0x3a, k);
atp_writeb_base(atpdev, 0x3a, k & 0xdf);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3a, k);
- mdelay(32);
+ msleep(32);
atp_set_host_id(atpdev, 0, host_id);
tscam(shpnt, wide_chip, scam_on);
@@ -1370,9 +1370,9 @@ static void atp880_init(struct Scsi_Host *shpnt)
k = atp_readb_base(atpdev, 0x38) & 0x80;
atp_writeb_base(atpdev, 0x38, k);
atp_writeb_base(atpdev, 0x3b, 0x20);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3b, 0);
- mdelay(32);
+ msleep(32);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
@@ -1454,10 +1454,10 @@ static void atp885_init(struct Scsi_Host *shpnt)
atp_writeb_base(atpdev, 0x28, k);
atp_writeb_pci(atpdev, 0, 1, 0x80);
atp_writeb_pci(atpdev, 1, 1, 0x80);
- mdelay(100);
+ msleep(100);
atp_writeb_pci(atpdev, 0, 1, 0);
atp_writeb_pci(atpdev, 1, 1, 0);
- mdelay(1000);
+ msleep(1000);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
atp_readb_io(atpdev, 1, 0x1b);
@@ -1473,7 +1473,7 @@ static void atp885_init(struct Scsi_Host *shpnt)
k = (k & 0x07) | 0x40;
atp_set_host_id(atpdev, 1, k);
- mdelay(600); /* this delay used to be called tscam_885() */
+ msleep(600); /* this delay used to be called tscam_885() */
dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n");
atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7);
atp_writeb_io(atpdev, 0, 0x16, 0x80);
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 2eb66df3e3d6..c10aac4dbc5e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1545,7 +1545,7 @@ int beiscsi_set_host_data(struct beiscsi_hba *phba)
snprintf((char *)ioctl->param.req.param_data,
sizeof(ioctl->param.req.param_data),
"Linux iSCSI v%s", BUILD_STR);
- ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
+ ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
ret = be_mbox_notify(ctrl);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a398c54139aa..c8f0a2144b44 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,11 +1,14 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 818d185d63f0..3660059784f7 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 66ca967f2850..8fdc07b6c686 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c05d6e91e4bd..c4a33317d344 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -70,21 +70,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
host_status = DID_ERROR;
}
}
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
break;
case BFI_IOIM_STS_TIMEDOUT:
- host_status = DID_TIME_OUT;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TIME_OUT << 16;
break;
case BFI_IOIM_STS_PATHTOV:
- host_status = DID_TRANSPORT_DISRUPTED;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
default:
- host_status = DID_ERROR;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_ERROR << 16;
}
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -117,7 +114,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
- cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
+ cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -144,7 +141,7 @@ bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
struct bfad_s *bfad = drv;
- cmnd->result = ScsiResult(DID_ERROR, 0);
+ cmnd->result = DID_ERROR << 16;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -1253,14 +1250,14 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
printk(KERN_WARNING
"bfad%d, queuecommand %p %x failed, BFA stopped\n",
bfad->inst_no, cmnd, cmnd->cmnd[0]);
- cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmnd->result = DID_NO_CONNECT << 16;
goto out_fail_cmd;
}
itnim = itnim_data->itnim;
if (!itnim) {
- cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
+ cmnd->result = DID_IMM_RETRY << 16;
goto out_fail_cmd;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index af66275570c3..e61ed8dad0b4 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -44,7 +44,6 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_LUN 16384
#define BFAD_TARGET_RESET_TMO 60
#define BFAD_LUN_RESET_TMO 60
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
/*
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 8f03a869ac98..e9e669a6c2bc 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2727,6 +2727,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index c535c52e72e5..1c5051b1c125 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -199,7 +199,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
- if (driver_byte(result) & DRIVER_SENSE) {
+ if (driver_byte(result) == DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index a10cf25ee7f9..23d07e9f87d0 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -761,27 +761,116 @@ out:
static int
csio_hw_get_flash_params(struct csio_hw *hw)
{
+ /* Table for non-Numonix supported flash parts. Numonix parts are left
+ * to the preexisting code. All flash parts have 64KB sectors.
+ */
+ static struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+ } supported_flash[] = {
+ { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ u32 part, manufacturer;
+ u32 density, size = 0;
+ u32 flashid = 0;
int ret;
- uint32_t info = 0;
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
- if (ret != 0)
+ if (ret)
return ret;
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- hw->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- hw->params.sf_nsec = 64;
- else
- return -EINVAL;
- hw->params.sf_size = 1 << info;
+ /* Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ hw->params.sf_size = supported_flash[part].size_mb;
+ hw->params.sf_nsec =
+ hw->params.sf_size / SF_SEC_SIZE;
+ goto found;
+ }
+
+ /* Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /* This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14 ... 0x19: /* 1MB - 32MB */
+ size = 1 << density;
+ break;
+ case 0x20: /* 64MB */
+ size = 1 << 26;
+ break;
+ case 0x21: /* 128MB */
+ size = 1 << 27;
+ break;
+ case 0x22: /* 256MB */
+ size = 1 << 28;
+ }
+ break;
+ }
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /* This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16: /* 32 MB */
+ size = 1 << 25;
+ break;
+ case 0x17: /* 64MB */
+ size = 1 << 26;
+ }
+ break;
+ }
+ case 0xc2: /* Macronix */
+ case 0xef: /* Winbond */ {
+ /* This Density -> Size decoding table is taken from
+ * Macronix and Winbond Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ case 0x18: /* 16MB */
+ size = 1 << density;
+ }
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /* Store decoded Flash size */
+ hw->params.sf_size = size;
+ hw->params.sf_nsec = size / SF_SEC_SIZE;
+found:
+ if (hw->params.sf_size < FLASH_MIN_SIZE)
+ csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, hw->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index faa357b62c61..5022e82ccc4f 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -39,6 +39,7 @@
#include <asm/page.h>
#include <linux/cache.h>
+#include "t4_values.h"
#include "csio_hw.h"
#include "csio_wr.h"
#include "csio_mb.h"
@@ -1309,8 +1310,11 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
struct csio_sge *sge = &wrm->sge;
uint32_t clsz = L1_CACHE_BYTES;
uint32_t s_hps = PAGE_SHIFT - 10;
- uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
+ u32 fl_align = clsz < 32 ? 32 : clsz;
+ u32 pack_align;
+ u32 ingpad, ingpack;
+ int pcie_cap;
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1318,14 +1322,82 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
SGE_HOST_PAGE_SIZE_A);
- sge->csio_fl_align = clsz < 32 ? 32 : clsz;
- ingpad = ilog2(sge->csio_fl_align) - 5;
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ u32 mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /* T5/T6 have a special interpretation of the "0"
+ * value for the Packing Boundary. This corresponds to 16
+ * bytes instead of the expected 32 bytes.
+ */
+ if (pack_align <= 16) {
+ ingpack = INGPACKBOUNDARY_16B_X;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = INGPACKBOUNDARY_64B_X;
+ fl_align = 64;
+ } else {
+ u32 pack_align_log = fls(pack_align) - 1;
+
+ ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+ fl_align = pack_align;
+ }
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ ingpad = INGPADBOUNDARY_32B_X;
+ else
+ ingpad = T6_INGPADBOUNDARY_8B_X;
csio_set_reg_field(hw, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
+ csio_set_reg_field(hw, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(ingpack));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
@@ -1337,14 +1409,16 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE3_A);
}
+ sge->csio_fl_align = fl_align;
+
csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
/* default value of rx_dma_offset of the NIC driver */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index a43d44e7e7dd..37b8dc60f5f6 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -1141,7 +1141,7 @@ static int afu_release(struct inode *inode, struct file *file)
*
* Return: 0 on success, -errno on failure
*/
-static int ocxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxlflash_context *ctx = vma->vm_file->private_data;
@@ -1164,8 +1164,7 @@ static int ocxlflash_mmap_fault(struct vm_fault *vmf)
mmio_area = ctx->psn_phys;
mmio_area += offset;
- vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
}
static const struct vm_operations_struct ocxlflash_vmops = {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 379890c4500b..acac6152f50b 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1104,7 +1104,7 @@ out:
*
* Return: 0 on success, VM_FAULT_SIGBUS on failure
*/
-static int cxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
@@ -1115,7 +1115,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int rc = 0;
+ vm_fault_t rc = 0;
int ctxid;
ctxid = cfg->ops->process_element(ctx);
@@ -1155,7 +1155,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
out:
if (likely(ctxi))
put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
return rc;
err:
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 60ef8df42b95..1ed2cd82129d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3473,9 +3473,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16)
- || status_byte(cmd->result) &
- CHECK_CONDITION)) {
+ if ((cmd->result == (DID_OK << 16) ||
+ status_byte(cmd->result) == CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index ffec695e0bfb..54da3166da8d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2175,15 +2175,13 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
- mutex_lock(&lport->disc.disc_mutex);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2712,7 +2710,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -2733,7 +2731,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
next_time = deadline;
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&lport->disc.disc_mutex);
return next_time;
}
@@ -3080,8 +3078,6 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
- mutex_unlock(&disc->disc_mutex);
- rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -3090,7 +3086,7 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
fc_rport_login(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&disc->disc_mutex);
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 85604795d8ee..16709735b546 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -146,14 +146,14 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id);
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp);
+ struct scsi_cmnd *scp);
static int gdth_async_event(gdth_ha_str *ha);
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt);
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
@@ -161,10 +161,11 @@ static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
gdth_evt_str *estr);
static void gdth_clear_events(void);
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count);
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
static int gdth_test_busy(gdth_ha_str *ha);
@@ -446,7 +447,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo cmndinfo;
DECLARE_COMPLETION_ONSTACK(wait);
int rval;
@@ -1982,11 +1983,11 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
/* command queueing/sending functions */
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
unsigned long flags;
TRACE(("gdth_putq() priority %d\n",priority));
@@ -2000,11 +2001,11 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
scp->SCp.ptr = NULL;
} else { /* queue not empty */
pscp = ha->req_first;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
/* priority: 0-highest,..,0xff-lowest */
while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
pscp = nscp;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
}
pscp->SCp.ptr = (char *)scp;
scp->SCp.ptr = (char *)nscp;
@@ -2013,7 +2014,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
#ifdef GDTH_STATISTICS
flags = 0;
- for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++flags;
if (max_rq < flags) {
max_rq = flags;
@@ -2024,8 +2025,8 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
static void gdth_next(gdth_ha_str *ha)
{
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
u8 b, t, l, firsttime;
u8 this_cmd, next_cmd;
unsigned long flags = 0;
@@ -2040,10 +2041,10 @@ static void gdth_next(gdth_ha_str *ha)
next_cmd = gdth_polling ? FALSE:TRUE;
cmd_index = 0;
- for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
- if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
- pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
+ pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
if (!nscp_cmndinfo->internal_command) {
b = nscp->device->channel;
t = nscp->device->id;
@@ -2250,7 +2251,7 @@ static void gdth_next(gdth_ha_str *ha)
if (!this_cmd)
break;
if (nscp == ha->req_first)
- ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
else
pscp->SCp.ptr = nscp->SCp.ptr;
if (!next_cmd)
@@ -2275,7 +2276,7 @@ static void gdth_next(gdth_ha_str *ha)
* gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
* buffers, kmap_atomic() as needed.
*/
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count)
{
u16 cpcount,i, max_sg = scsi_sg_count(scp);
@@ -2317,7 +2318,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
}
}
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
u8 t;
gdth_inq_data inq;
@@ -2419,7 +2420,8 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
return 0;
}
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2594,7 +2596,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
return cmd_index;
}
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
{
register gdth_cmd_str *cmdp;
u16 i;
@@ -2767,7 +2769,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
return cmd_index;
}
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2958,7 +2960,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
gdt6_dpram_str __iomem *dp6_ptr;
gdt2_dpram_str __iomem *dp2_ptr;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
int rval, i;
u8 IStatus;
u16 Service;
@@ -3217,7 +3219,7 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
}
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp)
+ struct scsi_cmnd *scp)
{
gdth_msg_str *msg;
gdth_cmd_str *cmdp;
@@ -3708,7 +3710,7 @@ static u8 gdth_timer_running;
static void gdth_timeout(struct timer_list *unused)
{
u32 i;
- Scsi_Cmnd *nscp;
+ struct scsi_cmnd *nscp;
gdth_ha_str *ha;
unsigned long flags;
@@ -3724,7 +3726,8 @@ static void gdth_timeout(struct timer_list *unused)
if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
++act_stats;
- for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (act_rq=0,
+ nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++act_rq;
TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
@@ -3909,12 +3912,12 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
}
-static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
int i;
unsigned long flags;
- Scsi_Cmnd *cmnd;
+ struct scsi_cmnd *cmnd;
u8 b;
TRACE2(("gdth_eh_bus_reset()\n"));
@@ -4465,7 +4468,7 @@ free_fail:
static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
gdth_ha_str *ha;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
unsigned long flags;
char cmnd[MAX_COMMAND_SIZE];
void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index e6e5ccb1e0f3..ee6ffcf388e8 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -162,9 +162,9 @@
#define BIGSECS 63 /* mapping 255*63 */
/* special command ptr. */
-#define UNUSED_CMND ((Scsi_Cmnd *)-1)
-#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
-#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define UNUSED_CMND ((struct scsi_cmnd *)-1)
+#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
+#define SCREEN_CMND ((struct scsi_cmnd *)-3)
#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
/* controller services */
@@ -867,7 +867,7 @@ typedef struct {
u16 service; /* service/firmware ver./.. */
u32 info;
u32 info2; /* additional info */
- Scsi_Cmnd *req_first; /* top of request queue */
+ struct scsi_cmnd *req_first; /* top of request queue */
struct {
u8 present; /* Flag: host drive present? */
u8 is_logdrv; /* Flag: log. drive (master)? */
@@ -896,7 +896,7 @@ typedef struct {
u32 id_list[MAXID]; /* IDs of the phys. devices */
} raw[MAXBUS]; /* SCSI channels */
struct {
- Scsi_Cmnd *cmnd; /* pending request */
+ struct scsi_cmnd *cmnd; /* pending request */
u16 service; /* service */
} cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
struct gdth_cmndinfo { /* per-command private info */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 20add49cdd32..3a9751a80225 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -626,7 +626,7 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
unsigned long flags;
int i;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo *cmndinfo;
u8 b, t;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 7052a5d45f7f..6c7d2e201abe 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/lcm.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -199,17 +200,17 @@ struct hisi_sas_slot {
int dlvry_queue_slot;
int cmplt_queue;
int cmplt_queue_slot;
- int idx;
int abort;
int ready;
- void *buf;
- dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
- struct work_struct abort_slot;
struct timer_list internal_abort_timer;
bool is_internal;
struct hisi_sas_tmf_task *tmf;
+ /* Do not reorder/change members after here */
+ void *buf;
+ dma_addr_t buf_dma;
+ int idx;
};
struct hisi_sas_hw {
@@ -277,6 +278,7 @@ struct hisi_hba {
int n_phy;
spinlock_t lock;
+ struct semaphore sem;
struct timer_list timer;
struct workqueue_struct *wq;
@@ -298,7 +300,6 @@ struct hisi_hba {
int queue_count;
- struct dma_pool *buffer_pool;
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
@@ -319,6 +320,7 @@ struct hisi_hba {
const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
+ u32 phy_state;
};
/* Generic HW DMA host memory structures */
@@ -479,4 +481,6 @@ extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
+extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
+extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 6f562974f8f6..a4e2e6aa9a6b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -242,20 +242,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->data_dir);
}
- if (slot->buf)
- dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
spin_lock_irqsave(&dq->lock, flags);
list_del_init(&slot->entry);
spin_unlock_irqrestore(&dq->lock, flags);
- slot->buf = NULL;
- slot->task = NULL;
- slot->port = NULL;
+
+ memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
+
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
- /* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -285,40 +281,6 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
device_id, abort_flag, tag_to_abort);
}
-/*
- * This function will issue an abort TMF regardless of whether the
- * task is in the sdev or not. Then it will do the task complete
- * cleanup and callbacks.
- */
-static void hisi_sas_slot_abort(struct work_struct *work)
-{
- struct hisi_sas_slot *abort_slot =
- container_of(work, struct hisi_sas_slot, abort_slot);
- struct sas_task *task = abort_slot->task;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct hisi_sas_tmf_task tmf_task;
- struct scsi_lun lun;
- struct device *dev = hisi_hba->dev;
- int tag = abort_slot->idx;
-
- if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
- dev_err(dev, "cannot abort slot for non-ssp task\n");
- goto out;
- }
-
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
-out:
- /* Do cleanup for this task */
- hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
- if (task->task_done)
- task->task_done(task);
-}
-
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
@@ -334,8 +296,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
- unsigned long flags, flags_dq;
struct hisi_sas_dq *dq;
+ unsigned long flags;
int wr_q_index;
if (!sas_port) {
@@ -430,30 +392,22 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- spin_lock_irqsave(&dq->lock, flags_dq);
+ spin_lock_irqsave(&dq->lock, flags);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&dq->lock, flags);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -464,7 +418,6 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot->tmf = tmf;
slot->is_internal = is_tmf;
task->lldd_task = slot;
- INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
@@ -488,21 +441,15 @@ static int hisi_sas_task_prep(struct sas_task *task,
break;
}
- spin_lock_irqsave(&dq->lock, flags);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
++(*pass);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -536,8 +483,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct device *dev = hisi_hba->dev;
struct hisi_sas_dq *dq = NULL;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (in_softirq())
+ return -EINVAL;
+
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
/* protect task_prep and start_delivery sequence */
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
@@ -819,6 +771,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
for (i = 0; i < HISI_PHYES_NUM; i++)
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
+
+ spin_lock_init(&phy->lock);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -862,7 +816,6 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
hisi_sas_slot_task_free(hisi_hba, task, slot);
}
-/* hisi_hba.lock should be locked */
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
@@ -914,7 +867,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
+ down(&hisi_hba->sem);
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ up(&hisi_hba->sem);
device->lldd_dev = NULL;
}
@@ -1351,21 +1306,12 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
}
}
-static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 old_state, state;
- int rc;
-
- if (!hisi_hba->hw->soft_reset)
- return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- return -1;
-
- dev_info(dev, "controller resetting...\n");
- old_state = hisi_hba->hw->get_phys_state(hisi_hba);
+ down(&hisi_hba->sem);
+ hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
@@ -1374,34 +1320,61 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- rc = hisi_hba->hw->soft_reset(hisi_hba);
- if (rc) {
- dev_warn(dev, "controller reset failed (%d)\n", rc);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- scsi_unblock_requests(shost);
- goto out;
- }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
+{
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
hisi_sas_refresh_port_id(hisi_hba);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_info(dev, "controller reset complete\n");
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
-out:
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ int rc;
- return rc;
+ if (!hisi_hba->hw->soft_reset)
+ return -1;
+
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
+ dev_info(dev, "controller resetting...\n");
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = hisi_hba->hw->soft_reset(hisi_hba);
+ if (rc) {
+ dev_warn(dev, "controller reset failed (%d)\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ return rc;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "controller reset complete\n");
+
+ return 0;
}
static int hisi_sas_abort_task(struct sas_task *task)
@@ -1644,14 +1617,32 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+ int rc, i;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (r.done)
- return TMF_RESP_FUNC_COMPLETE;
+ if (!r.done)
+ return TMF_RESP_FUNC_FAILED;
+
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
- return TMF_RESP_FUNC_FAILED;
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
+ DEV_IS_EXPANDER(device->dev_type))
+ continue;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
+ sas_dev->device_id, rc);
+ }
+
+ hisi_sas_release_tasks(hisi_hba);
+
+ return TMF_RESP_FUNC_COMPLETE;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1723,21 +1714,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_unlock_irqrestore(&hisi_hba->lock, flags);
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
spin_lock_irqsave(&dq->lock, flags_dq);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
spin_unlock_irqrestore(&dq->lock, flags_dq);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
@@ -1745,7 +1728,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -1767,7 +1749,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
@@ -1776,9 +1758,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -1919,7 +1898,8 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
} else {
struct hisi_sas_port *port = phy->port;
- if (phy->in_reset) {
+ if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
}
@@ -2014,8 +1994,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int max_command_entries_ru, sz_slot_buf_ru;
+ int blk_cnt, slots_per_blk;
+ sema_init(&hisi_hba->sem, 1);
spin_lock_init(&hisi_hba->lock);
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_init(hisi_hba, i);
@@ -2045,29 +2028,27 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* Delivery queue */
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->cmd_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->cmd_hdr[i])
goto err_out;
/* Completion queue */
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->complete_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->complete_hdr[i])
goto err_out;
}
- s = sizeof(struct hisi_sas_slot_buf_table);
- hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
- if (!hisi_hba->buffer_pool)
- goto err_out;
-
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
- GFP_KERNEL);
+ hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
+ GFP_KERNEL);
if (!hisi_hba->itct)
goto err_out;
+ memset(hisi_hba->itct, 0, s);
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
@@ -2075,15 +2056,45 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->slot_info)
goto err_out;
+ /* roundup to avoid overly large block size */
+ max_command_entries_ru = roundup(max_command_entries, 64);
+ sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
+ s = lcm(max_command_entries_ru, sz_slot_buf_ru);
+ blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
+ slots_per_blk = s / sz_slot_buf_ru;
+ for (i = 0; i < blk_cnt; i++) {
+ struct hisi_sas_slot_buf_table *buf;
+ dma_addr_t buf_dma;
+ int slot_index = i * slots_per_blk;
+
+ buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
+ if (!buf)
+ goto err_out;
+ memset(buf, 0, s);
+
+ for (j = 0; j < slots_per_blk; j++, slot_index++) {
+ struct hisi_sas_slot *slot;
+
+ slot = &hisi_hba->slot_info[slot_index];
+ slot->buf = buf;
+ slot->buf_dma = buf_dma;
+ slot->idx = slot_index;
+
+ buf++;
+ buf_dma += sizeof(*buf);
+ }
+ }
+
s = max_command_entries * sizeof(struct hisi_sas_iost);
- hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
- GFP_KERNEL);
+ hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
+ GFP_KERNEL);
if (!hisi_hba->iost)
goto err_out;
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->breakpoint_dma, GFP_KERNEL);
+ hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->breakpoint)
goto err_out;
@@ -2094,14 +2105,16 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
- &hisi_hba->initial_fis_dma, GFP_KERNEL);
+ hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
+ &hisi_hba->initial_fis_dma,
+ GFP_KERNEL);
if (!hisi_hba->initial_fis)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
+ hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->sata_breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
goto err_out;
hisi_sas_init_mem(hisi_hba);
@@ -2122,54 +2135,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc);
void hisi_sas_free(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
-
- for (i = 0; i < hisi_hba->queue_count; i++) {
- s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->cmd_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->cmd_hdr[i],
- hisi_hba->cmd_hdr_dma[i]);
-
- s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->complete_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->complete_hdr[i],
- hisi_hba->complete_hdr_dma[i]);
- }
-
- dma_pool_destroy(hisi_hba->buffer_pool);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- if (hisi_hba->itct)
- dma_free_coherent(dev, s,
- hisi_hba->itct, hisi_hba->itct_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_iost);
- if (hisi_hba->iost)
- dma_free_coherent(dev, s,
- hisi_hba->iost, hisi_hba->iost_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- if (hisi_hba->breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->breakpoint,
- hisi_hba->breakpoint_dma);
-
-
- s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- if (hisi_hba->initial_fis)
- dma_free_coherent(dev, s,
- hisi_hba->initial_fis,
- hisi_hba->initial_fis_dma);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- if (hisi_hba->sata_breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->sata_breakpoint,
- hisi_hba->sata_breakpoint_dma);
-
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 89ab18c1959c..8f60f0e04599 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -903,23 +903,28 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1296,11 +1301,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
- if (unlikely(slot->abort)) {
- queue_work(hisi_hba->wq, &slot->abort_slot);
- /* immediately return and do not complete */
+ if (unlikely(slot->abort))
return ts->stat;
- }
goto out;
}
@@ -1469,7 +1471,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 213c530e63f2..9c5c5a601332 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1665,23 +1665,28 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -2840,7 +2845,8 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if (bcast_status & RX_BCAST_CHG_MSK)
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
@@ -3234,8 +3240,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
if (fis->status & ATA_ERR) {
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
fis->status);
- disable_phy_v2_hw(hisi_hba, phy_no);
- enable_phy_v2_hw(hisi_hba, phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
res = IRQ_NONE;
goto end;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 9f1e2d03f914..08b503e274b8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -51,7 +51,6 @@
#define CFG_ABT_SET_IPTT_DONE 0xd8
#define CFG_ABT_SET_IPTT_DONE_OFF 0
#define HGC_IOMB_PROC1_STATUS 0x104
-#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
#define HGC_AXI_FIFO_ERR_INFO 0x154
#define AXI_ERR_INFO_OFF 0
@@ -121,6 +120,8 @@
#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
#define PHY_CFG_DC_OPT_OFF 2
#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PHY_CFG_PHY_RST_OFF 3
+#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
@@ -131,6 +132,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CTA_OFF 17
#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -206,6 +210,8 @@
#define AXI_MASTER_CFG_BASE (0x5000)
#define AM_CTRL_GLOBAL (0x0)
+#define AM_CTRL_SHUTDOWN_REQ_OFF 0
+#define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
#define AM_CURR_TRANS_RETURN (0x150)
#define AM_CFG_MAX_TRANS (0x5010)
@@ -425,7 +431,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
- hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -486,6 +491,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
@@ -758,15 +764,25 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
cfg |= PHY_CFG_ENA_MSK;
+ cfg &= ~PHY_CFG_PHY_RST_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
}
static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+ u32 state;
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+
+ mdelay(50);
+
+ state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ if (state & BIT(phy_no)) {
+ cfg |= PHY_CFG_PHY_RST_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+ }
}
static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -866,23 +882,28 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1170,6 +1191,16 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
+
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
+ phy_no, fis->status);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ res = IRQ_NONE;
+ goto end;
+ }
+
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
attached_sas_addr[7] = phy_no;
@@ -1256,9 +1287,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -1327,11 +1362,77 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
},
};
-static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
- struct hisi_hba *hisi_hba = p;
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &port_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
+}
+
+static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct pci_dev *pci_dev = hisi_hba->pci_dev;
+ struct device *dev = hisi_hba->dev;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n", phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
+ (pci_dev->revision == 0x20)) {
+ u32 reg_value;
+ int rc;
+
+ rc = hisi_sas_read32_poll_timeout_atomic(
+ HILINK_ERR_DFX, reg_value,
+ !((reg_value >> 8) & BIT(phy_no)),
+ 1000, 10000);
+ if (rc)
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
+}
+
+static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
u32 irq_msk;
int phy_no = 0;
@@ -1341,84 +1442,12 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
while (irq_msk) {
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
- u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1_MSK);
- u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2_MSK);
-
- irq_value1 &= ~irq_msk1;
- irq_value2 &= ~irq_msk2;
-
- if ((irq_msk & (4 << (phy_no * 4))) &&
- irq_value1) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
- const struct hisi_sas_hw_error *error =
- &port_axi_error[i];
-
- if (!(irq_value1 & error->irq_msk))
- continue;
-
- dev_err(dev, "%s error (phy%d 0x%x) found!\n",
- error->msg, phy_no, irq_value1);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ if (irq_msk & (4 << (phy_no * 4)))
+ handle_chl_int1_v3_hw(hisi_hba, phy_no);
- if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
- dev_warn(dev, "phy%d identify timeout\n",
- phy_no);
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
-
- }
-
- if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
- u32 reg_value = hisi_sas_phy_read32(hisi_hba,
- phy_no, STP_LINK_TIMEOUT_STATE);
-
- dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
- phy_no, reg_value);
- if (reg_value & BIT(4))
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
-
- if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
- (pci_dev->revision == 0x20)) {
- u32 reg_value;
- int rc;
-
- rc = hisi_sas_read32_poll_timeout_atomic(
- HILINK_ERR_DFX, reg_value,
- !((reg_value >> 8) & BIT(phy_no)),
- 1000, 10000);
- if (rc) {
- disable_phy_v3_hw(hisi_hba, phy_no);
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2,
- BIT(CHL_INT2_RX_INVLD_DW_OFF));
- hisi_sas_phy_read32(hisi_hba, phy_no,
- ERR_CNT_INVLD_DW);
- mdelay(1);
- enable_phy_v3_hw(hisi_hba, phy_no);
- }
- }
- }
+ if (irq_msk & (8 << (phy_no * 4)))
+ handle_chl_int2_v3_hw(hisi_hba, phy_no);
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
hisi_sas_phy_write32(hisi_hba, phy_no,
@@ -1964,11 +1993,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
}
-static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ u32 status, reg_val;
int rc;
- u32 status;
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
@@ -1978,14 +2007,32 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
mdelay(10);
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
/* wait until bus idle */
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status,
status == 0x3, 10, 100);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
return rc;
}
@@ -2433,6 +2480,41 @@ static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ dev_info(dev, "FLR prepare\n");
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc)
+ dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
+}
+
+static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ hisi_sas_init_mem(hisi_hba);
+
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
+ return;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "FLR done\n");
+}
+
enum {
/* instances of the controller */
hip08,
@@ -2444,38 +2526,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 device_state, status;
+ u32 device_state;
int rc;
- u32 reg_val;
if (!pdev->pm_cap) {
dev_err(dev, "PCI PM not supported\n");
return -ENODEV;
}
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
- /* disable DQ/PHY/bus */
- interrupt_disable_v3_hw(hisi_hba);
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
-
- hisi_sas_stop_phys(hisi_hba);
- reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL);
- reg_val |= 0x1;
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL, reg_val);
-
- /* wait until bus idle */
- rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
- AM_CURR_TRANS_RETURN, status,
- status == 0x3, 10, 100);
+ rc = disable_host_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
@@ -2538,6 +2606,8 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.error_detected = hisi_sas_error_detected_v3_hw,
.mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
.slot_reset = hisi_sas_slot_reset_v3_hw,
+ .reset_prepare = hisi_sas_reset_prepare_v3_hw,
+ .reset_done = hisi_sas_reset_done_v3_hw,
};
static struct pci_driver sas_v3_pci_driver = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3771e59a9fae..f02dcc875a09 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,6 +563,38 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+struct scsi_host_mq_in_flight {
+ int cnt;
+};
+
+static void scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ struct scsi_host_mq_in_flight *in_flight = data;
+
+ if (blk_mq_request_started(rq))
+ in_flight->cnt++;
+}
+
+/**
+ * scsi_host_busy - Return the host busy counter
+ * @shost: Pointer to Scsi_Host to inc.
+ **/
+int scsi_host_busy(struct Scsi_Host *shost)
+{
+ struct scsi_host_mq_in_flight in_flight = {
+ .cnt = 0,
+ };
+
+ if (!shost->use_blk_mq)
+ return atomic_read(&shost->host_busy);
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
+ &in_flight);
+ return in_flight.cnt;
+}
+EXPORT_SYMBOL(scsi_host_busy);
+
/**
* scsi_host_put - dec a Scsi_Host ref count
* @shost: Pointer to Scsi_Host to dec.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index daefe8172b04..b64ca977825d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1322,7 +1322,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
/**
* ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
- * @scmd: Scsi_Cmnd with the scatterlist
+ * @scmd: struct scsi_cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 17df76f0be3c..9df8a1a2299c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static LIST_HEAD(ibmvscsi_head);
@@ -262,7 +262,7 @@ static void gather_partition_info(void)
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
@@ -681,7 +681,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
/**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
- * @cmd: Scsi_Cmnd with the scatterlist
+ * @cmd: struct scsi_cmnd with the scatterlist
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
@@ -1274,14 +1274,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
if (hostdata->client_migrated)
hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
- strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+ strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
- hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(hostdata->dev);
- strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
- hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+ strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
req->buffer = cpu_to_be64(hostdata->caps_addr);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c3a76af9f5fa..fac377320158 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2233,7 +2233,7 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
return -ENOMEM;
}
- nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
+ nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
TARGET_PROT_NORMAL, name, nexus,
NULL);
if (IS_ERR(nexus->se_sess)) {
@@ -2267,8 +2267,7 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
@@ -3928,7 +3927,6 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct ibmvscsis_tport *tport =
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 87c94191033b..8c6627bc8a39 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -892,7 +892,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
/* Check for optional message byte */
if (imm_wait(dev) == (unsigned char) 0xb8)
imm_in(dev, &h, 1);
- cmd->result = (DID_OK << 16) + (l & STATUS_MASK);
+ cmd->result = (DID_OK << 16) | (l & STATUS_MASK);
}
if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
w_ctr(ppb, 0x4);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 02d65dce74e5..f2ec80b0ffc0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2412,6 +2412,28 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_service_required_error - Log a sis64 service required error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_41_error *error;
+
+ error = &hostrcb->hcam.u.error64.u.type_41_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
+ ipr_log_hex_data(ioa_cfg, error->data,
+ be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_41_error, data)));
+}
+/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -2586,6 +2608,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_30:
ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_41:
+ ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 93570734cbfb..68afbbde54d3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1135,6 +1135,11 @@ struct ipr_hostrcb_type_30_error {
struct ipr_hostrcb64_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_41_error {
+ u8 failure_reason[64];
+ __be32 data[200];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_error {
__be32 fd_ioasc;
struct ipr_res_addr fd_res_addr;
@@ -1173,6 +1178,7 @@ struct ipr_hostrcb64_error {
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
+ struct ipr_hostrcb_type_41_error type_41_error;
} u;
}__attribute__((packed, aligned (8)));
@@ -1218,6 +1224,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
+#define IPR_HOST_RCB_OVERLAY_ID_41 0x41
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 3f3569ec5ce3..f969a71348ef 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -59,34 +59,25 @@ static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_stop_rports() - Delete all the remote ports associated with the lport
* @disc: The discovery job to stop remote ports on
- *
- * Locking Note: This function expects that the lport mutex is locked before
- * calling it.
*/
static void fc_disc_stop_rports(struct fc_disc *disc)
{
- struct fc_lport *lport;
struct fc_rport_priv *rdata;
- lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
}
/**
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
* @disc: The discovery object to which the RSCN applies
* @fp: The RSCN frame
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
{
@@ -101,6 +92,8 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
+ lockdep_assert_held(&disc->disc_mutex);
+
lport = fc_disc_lport(disc);
FC_DISC_DBG(disc, "Received an RSCN event\n");
@@ -220,12 +213,11 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_disc_restart() - Restart discovery
* @disc: The discovery object to be restarted
- *
- * Locking Note: This function expects that the disc mutex
- * is already locked.
*/
static void fc_disc_restart(struct fc_disc *disc)
{
+ lockdep_assert_held(&disc->disc_mutex);
+
if (!disc->disc_callback)
return;
@@ -271,16 +263,13 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
* fc_disc_done() - Discovery has been completed
* @disc: The discovery context
* @event: The discovery completion status
- *
- * Locking Note: This function expects that the disc mutex is locked before
- * it is called. The discovery callback is then made with the lock released,
- * and the lock is re-taken before returning from this function
*/
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
{
struct fc_lport *lport = fc_disc_lport(disc);
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&disc->disc_mutex);
FC_DISC_DBG(disc, "Discovery complete\n");
disc->pending = 0;
@@ -294,9 +283,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* discovery, reverify or log them in. Otherwise, log them out.
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
+ *
+ * We don't need to use the _rcu variant here as the rport list
+ * is protected by the disc mutex which is already held on entry.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
if (rdata->disc_id) {
@@ -307,7 +298,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
}
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -360,15 +350,14 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
/**
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
* @lport: The discovery context
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
struct fc_frame *fp;
struct fc_lport *lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
+
WARN_ON(!fc_lport_test_ready(lport));
disc->pending = 1;
@@ -658,8 +647,6 @@ out:
* @lport: The local port to initiate discovery on
* @rdata: remote port private data
*
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
* On failure, an error code is returned.
*/
static int fc_disc_gpn_id_req(struct fc_lport *lport,
@@ -667,6 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->disc.disc_mutex);
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_fid));
if (!fp)
@@ -683,14 +671,13 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
* fc_disc_single() - Discover the directory information for a single target
* @lport: The local port the remote port is associated with
* @dp: The port to rediscover
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
@@ -708,7 +695,9 @@ static void fc_disc_stop(struct fc_lport *lport)
if (disc->pending)
cancel_delayed_work_sync(&disc->disc_work);
+ mutex_lock(&disc->disc_mutex);
fc_disc_stop_rports(disc);
+ mutex_unlock(&disc->disc_mutex);
}
/**
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 21be672679fb..be83590ed955 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -237,14 +237,13 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->ptp_rdata) {
fc_rport_logoff(lport->ptp_rdata);
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
@@ -403,12 +402,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
* fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
* @lport: Fibre Channel local port receiving the RLIR
* @fp: The RLIR request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
@@ -420,9 +418,6 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
* fc_lport_recv_echo_req() - Handle received ECHO request
* @lport: The local port receiving the ECHO
* @fp: ECHO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_echo_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -432,6 +427,8 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
void *pp;
void *dp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
fc_lport_state(lport));
@@ -456,9 +453,6 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
* @lport: The local port receiving the RNID
* @fp: The RNID request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -474,6 +468,8 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
u8 fmt;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
fc_lport_state(lport));
@@ -515,12 +511,11 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
* @lport: The local port receiving the LOGO
* @fp: The LOGO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
@@ -553,11 +548,11 @@ EXPORT_SYMBOL(fc_fabric_login);
/**
* __fc_linkup() - Handler for transport linkup events
* @lport: The lport whose link is up
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkup(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (!lport->link_up) {
lport->link_up = 1;
@@ -584,11 +579,11 @@ EXPORT_SYMBOL(fc_linkup);
/**
* __fc_linkdown() - Handler for transport linkdown events
* @lport: The lport whose link is down
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkdown(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->link_up) {
lport->link_up = 0;
fc_lport_enter_reset(lport);
@@ -722,12 +717,11 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
/**
* fc_rport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered READY from state %s\n",
fc_lport_state(lport));
@@ -745,13 +739,12 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
* @fp: The frame containing the incoming request, or NULL.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (port_id)
printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
@@ -801,9 +794,6 @@ EXPORT_SYMBOL(fc_lport_set_local_id);
* A received FLOGI request indicates a point-to-point connection.
* Accept it with the common service parameters indicating our N port.
* Set up to do a PLOGI if we have the higher-number WWPN.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -816,6 +806,8 @@ static void fc_lport_recv_flogi_req(struct fc_lport *lport,
u32 remote_fid;
u32 local_fid;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
fc_lport_state(lport));
@@ -1006,12 +998,11 @@ EXPORT_SYMBOL(fc_lport_reset);
/**
* fc_lport_reset_locked() - Reset the local port w/ the lport lock held
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->dns_rdata) {
fc_rport_logoff(lport->dns_rdata);
lport->dns_rdata = NULL;
@@ -1035,12 +1026,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
/**
* fc_lport_enter_reset() - Reset the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
@@ -1065,12 +1055,11 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
/**
* fc_lport_enter_disabled() - Disable the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_disabled(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
fc_lport_state(lport));
@@ -1321,14 +1310,13 @@ err:
/**
* fc_lport_enter_scr() - Send a SCR (State Change Register) request
* @lport: The local port to register for state changes
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_scr(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
fc_lport_state(lport));
@@ -1349,9 +1337,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
/**
* fc_lport_enter_ns() - register some object with the name server
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1360,6 +1345,8 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1419,14 +1406,13 @@ static struct fc_rport_operations fc_lport_rport_ops = {
/**
* fc_rport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
fc_lport_state(lport));
@@ -1449,9 +1435,6 @@ err:
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1461,6 +1444,8 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
size_t len;
int numattrs;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1536,14 +1521,13 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
/**
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
fc_lport_state(lport));
@@ -1668,15 +1652,14 @@ EXPORT_SYMBOL(fc_lport_logo_resp);
/**
* fc_rport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
{
struct fc_frame *fp;
struct fc_els_logo *logo;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
fc_lport_state(lport));
@@ -1811,14 +1794,13 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
* fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
fc_lport_state(lport));
@@ -1962,9 +1944,6 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination port id
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
@@ -1976,6 +1955,8 @@ static int fc_lport_els_request(struct bsg_job *job,
char *pp;
int len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, job->request_payload.payload_len);
if (!fp)
return -ENOMEM;
@@ -2023,9 +2004,6 @@ static int fc_lport_els_request(struct bsg_job *job,
* @lport: The local port sending the request
* @did: The destination FC-ID
* @tov: The timeout period to wait for the response
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
@@ -2036,6 +2014,8 @@ static int fc_lport_ct_request(struct bsg_job *job,
struct fc_ct_req *ct;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
job->request_payload.payload_len);
if (!fp)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 89b1f1af2fd4..372387a450df 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -136,13 +136,13 @@ EXPORT_SYMBOL(fc_rport_lookup);
* @ids: The identifiers for the new remote port
*
* The remote port will start in the INIT state.
- *
- * Locking note: must be called with the disc_mutex held.
*/
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -184,6 +184,7 @@ void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
+ WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
@@ -245,12 +246,12 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
* fc_rport_state_enter() - Change the state of a remote port
* @rdata: The remote port whose state should change
* @new: The new state
- *
- * Locking Note: Called with the rport lock held
*/
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
enum fc_rport_state new)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state != new)
rdata->retries = 0;
rdata->rp_state = new;
@@ -469,8 +470,6 @@ EXPORT_SYMBOL(fc_rport_login);
* @rdata: The remote port to be deleted
* @event: The event to report as the reason for deletion
*
- * Locking Note: Called with the rport lock held.
- *
* Allow state change into DELETE only once.
*
* Call queue_work only if there's no event already pending.
@@ -483,6 +482,8 @@ EXPORT_SYMBOL(fc_rport_login);
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state == RPORT_ST_DELETE)
return;
@@ -546,13 +547,12 @@ EXPORT_SYMBOL(fc_rport_logoff);
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
* @rdata: The remote port that is ready
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
fc_rport_state_enter(rdata, RPORT_ST_READY);
FC_RPORT_DBG(rdata, "Port is Ready\n");
@@ -615,15 +615,14 @@ static void fc_rport_timeout(struct work_struct *work)
* @rdata: The remote port the error is happened on
* @err: The error code
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: does not modify kref
*/
static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
-err, fc_rport_state(rdata), rdata->retries);
@@ -662,15 +661,14 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: increments kref when scheduling retry_work
*/
static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
+ lockdep_assert_held(&rdata->rp_mutex);
+
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (err == -FC_EX_CLOSED)
goto out;
@@ -822,9 +820,6 @@ bad:
* fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
* @rdata: The remote port to send a FLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
@@ -832,6 +827,8 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!lport->point_to_multipoint)
return fc_rport_enter_plogi(rdata);
@@ -1071,9 +1068,6 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
* fc_rport_enter_plogi() - Send Port Login (PLOGI) request
* @rdata: The remote port to send a PLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
@@ -1081,6 +1075,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!fc_rport_compatible_roles(lport, rdata)) {
FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
@@ -1232,9 +1228,6 @@ put:
* fc_rport_enter_prli() - Send Process Login (PRLI) request
* @rdata: The remote port to send the PRLI request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
@@ -1247,6 +1240,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
/*
* If the rport is one of the well known addresses
* we skip PRLI and RTV and go straight to READY.
@@ -1372,9 +1367,6 @@ put:
* fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
* @rdata: The remote port to send the RTV request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
@@ -1382,6 +1374,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
fc_rport_state(rdata));
@@ -1406,8 +1400,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
* @rdata: The remote port that sent the RTV request
* @in_fp: The RTV request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1417,6 +1409,9 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_els_rtv_acc *rtv;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received RTV request\n");
fp = fc_frame_alloc(lport, sizeof(*rtv));
@@ -1460,9 +1455,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
* fc_rport_enter_logo() - Send a logout (LOGO) request
* @rdata: The remote port to send the LOGO request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
@@ -1470,6 +1462,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
fc_rport_state(rdata));
@@ -1548,9 +1542,6 @@ put:
* fc_rport_enter_adisc() - Send Address Discover (ADISC) request
* @rdata: The remote port to send the ADISC request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
@@ -1558,6 +1549,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
fc_rport_state(rdata));
@@ -1581,8 +1574,6 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
* fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
* @rdata: The remote port that sent the ADISC request
* @in_fp: The ADISC request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1592,6 +1583,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_els_adisc *adisc;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received ADISC request\n");
adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
@@ -1618,9 +1612,6 @@ drop:
* fc_rport_recv_rls_req() - Handle received Read Link Status request
* @rdata: The remote port that sent the RLS request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1634,6 +1625,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc_host_statistics *hst;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
fc_rport_state(rdata));
@@ -1687,8 +1680,6 @@ out:
* Handle incoming ELS requests that require port login.
* The ELS opcode has already been validated by the caller.
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -1696,6 +1687,8 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
if (!rdata) {
FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
@@ -1783,14 +1776,14 @@ busy:
* @lport: The local port that received the request
* @fp: The request frame
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
/*
* Handle FLOGI, PLOGI and LOGO requests separately, since they
* don't require prior login.
@@ -1831,8 +1824,6 @@ EXPORT_SYMBOL(fc_rport_recv_req);
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
*
- * Locking Note: The rport lock is held before calling this function.
- *
* Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
@@ -1845,6 +1836,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_seq_els_data rjt_data;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
sid = fc_frame_sid(fp);
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
@@ -1955,9 +1948,6 @@ reject:
* fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
* @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1976,6 +1966,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -2072,9 +2064,6 @@ drop:
* fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
* @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -2091,6 +2080,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
unsigned int plen;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
fc_rport_state(rdata));
@@ -2144,9 +2135,6 @@ drop:
* @lport: The local port that received the LOGO request
* @fp: The LOGO request frame
*
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
- *
* Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -2154,6 +2142,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c972cc2b3d5b..93c66ebad907 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1705,6 +1705,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
sc->result = DID_NO_CONNECT << 16;
break;
}
+ /* fall through */
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1832,6 +1833,7 @@ static void iscsi_tmf_timedout(struct timer_list *t)
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
struct iscsi_tm *hdr, int age,
int timeout)
+ __must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
struct iscsi_task *task;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 369ef8f23b24..4fcb9e65be57 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -695,7 +695,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
- * Setup copy of Data-In into the Scsi_Cmnd
+ * Setup copy of Data-In into the struct scsi_cmnd
* Scatterlist case:
* We set up the iscsi_segment to point to the next
* scatterlist entry to copy to. As we go along,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index ff1d612f6fb9..64a958a99f6a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -176,7 +176,6 @@ qc_already_gone:
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
- unsigned long flags;
struct sas_task *task;
struct scatterlist *sg;
int ret = AC_ERR_SYSTEM;
@@ -187,10 +186,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct Scsi_Host *host = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(host->transportt);
- /* TODO: audit callers to ensure they are ready for qc_issue to
- * unconditionally re-enable interrupts
- */
- local_irq_save(flags);
+ /* TODO: we should try to remove that unlock */
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
@@ -252,7 +248,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
out:
spin_lock(ap->lock);
- local_irq_restore(flags);
return ret;
}
@@ -557,34 +552,46 @@ int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
struct Scsi_Host *shost = ha->core.shost;
+ struct ata_host *ata_host;
struct ata_port *ap;
int rc;
- ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
- ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
- &sata_port_info,
- shost);
+ ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
+ if (!ata_host) {
+ SAS_DPRINTK("ata host alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ ata_host_init(ata_host, ha->dev, &sas_sata_ops);
+
+ ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
if (!ap) {
SAS_DPRINTK("ata_sas_port_alloc failed.\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_host;
}
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
rc = ata_sas_port_init(ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
- rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
+ if (rc)
+ goto destroy_port;
+
+ rc = ata_sas_tport_add(ata_host->dev, ap);
+ if (rc)
+ goto destroy_port;
+
+ found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
+
+destroy_port:
+ ata_sas_port_destroy(ap);
+free_host:
+ ata_host_put(ata_host);
+ return rc;
}
void sas_ata_task_abort(struct sas_task *task)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 1ffca28fe6a8..0148ae62a52a 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
ata_sas_port_destroy(dev->sata_dev.ap);
+ ata_host_put(dev->sata_dev.ata_host);
+ dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ceab5e5c41c2..33229348dcb6 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -759,7 +759,7 @@ retry:
spin_unlock_irq(shost->host_lock);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
- __func__, atomic_read(&shost->host_busy), shost->host_failed);
+ __func__, scsi_host_busy(shost), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
@@ -801,7 +801,7 @@ out:
goto retry;
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
- __func__, atomic_read(&shost->host_busy),
+ __func__, scsi_host_busy(shost),
shost->host_failed, tries);
}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index cb6aa802c48e..092a971d066b 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,8 +1,8 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
-# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 20b249a649dd..e0d0da5f43d6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -840,8 +840,7 @@ struct lpfc_hba {
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
- uint32_t nvme_embed_pbde;
- uint32_t fcp_embed_pbde;
+ uint32_t cfg_enable_pbde;
uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 729d343861f4..5a25553415f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -64,6 +64,9 @@
#define LPFC_MIN_MRQ_POST 512
#define LPFC_MAX_MRQ_POST 2048
+#define LPFC_MAX_NVME_INFO_TMP_LEN 100
+#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
+
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *statep;
int i;
int len = 0;
+ char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
- len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
+ len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
return len;
}
if (phba->nvmet_support) {
if (!phba->targetport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Target: x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
@@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
statep = "REGISTERED";
else
statep = "INIT";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME Target Enabled State %s\n",
- statep);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
- "NVME Target: lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- phba->targetport->port_id);
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "\nNVME Target: Statistics\n");
+ scnprintf(tmp, sizeof(tmp),
+ "NVME Target Enabled State %s\n",
+ statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
+ "NVME Target: lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ phba->targetport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
+ >= PAGE_SIZE)
+ goto buffer_done;
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Rcv %08x Drop %08x Abort %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_drop),
- atomic_read(&tgtp->xmt_ls_abort));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Rcv %08x Drop %08x Abort %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_drop),
+ atomic_read(&tgtp->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv LS: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv LS: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp),
- atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS: RSP Abort %08x xb %08x Err %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp_aborted),
- atomic_read(&tgtp->xmt_ls_rsp_xb_set),
- atomic_read(&tgtp->xmt_ls_rsp_error));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Defer %08x Release %08x "
- "Drop %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_defer),
- atomic_read(&tgtp->xmt_fcp_release),
- atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp),
+ atomic_read(&tgtp->xmt_ls_drop),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
+ atomic_read(&tgtp->xmt_ls_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
+ atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv FCP: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv FCP: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
- "drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_read),
- atomic_read(&tgtp->xmt_fcp_read_rsp),
- atomic_read(&tgtp->xmt_fcp_write),
- atomic_read(&tgtp->xmt_fcp_rsp),
- atomic_read(&tgtp->xmt_fcp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
- atomic_read(&tgtp->xmt_fcp_rsp_error),
- atomic_read(&tgtp->xmt_fcp_rsp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_aborted),
- atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
- atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Xmt %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_fcp_abort),
- atomic_read(&tgtp->xmt_fcp_abort_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
- atomic_read(&tgtp->xmt_abort_sol),
- atomic_read(&tgtp->xmt_abort_unsol),
- atomic_read(&tgtp->xmt_abort_rsp),
- atomic_read(&tgtp->xmt_abort_rsp_error));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DELAY: ctx %08x fod %08x wqfull %08x\n",
- atomic_read(&tgtp->defer_ctx),
- atomic_read(&tgtp->defer_fod),
- atomic_read(&tgtp->defer_wqfull));
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+ "drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_read),
+ atomic_read(&tgtp->xmt_fcp_read_rsp),
+ atomic_read(&tgtp->xmt_fcp_write),
+ atomic_read(&tgtp->xmt_fcp_rsp),
+ atomic_read(&tgtp->xmt_fcp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+ atomic_read(&tgtp->xmt_fcp_rsp_error),
+ atomic_read(&tgtp->xmt_fcp_rsp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Xmt %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_fcp_abort),
+ atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_abort_sol),
+ atomic_read(&tgtp->xmt_abort_unsol),
+ atomic_read(&tgtp->xmt_abort_rsp),
+ atomic_read(&tgtp->xmt_abort_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "DELAY: ctx %08x fod %08x wqfull %08x\n",
+ atomic_read(&tgtp->defer_ctx),
+ atomic_read(&tgtp->defer_fod),
+ atomic_read(&tgtp->defer_wqfull));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Calculate outstanding IOs */
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
- len += snprintf(buf + len, PAGE_SIZE - len,
- "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
- "CTX Outstanding %08llx\n",
- phba->sli4_hba.nvmet_xri_cnt,
- phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total,
- tot);
-
- len += snprintf(buf+len, PAGE_SIZE-len, "\n");
- return len;
+ scnprintf(tmp, sizeof(tmp),
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n\n",
+ phba->sli4_hba.nvmet_xri_cnt,
+ phba->sli4_hba.nvmet_io_wait_cnt,
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
+ strlcat(buf, tmp, PAGE_SIZE);
+ goto buffer_done;
}
localport = vport->localport;
if (!localport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Initiator x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
lport = (struct lpfc_nvme_lport *)localport->private;
- len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
-
- spin_lock_irq(shost->host_lock);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
- phba->brd_no,
- phba->sli4_hba.max_cfg_param.max_xri,
- phba->sli4_hba.nvme_xri_max,
- phba->sli4_hba.scsi_xri_max,
- lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ rcu_read_lock();
+ scnprintf(tmp, sizeof(tmp),
+ "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ phba->brd_no,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.nvme_xri_max,
+ phba->sli4_hba.scsi_xri_max,
+ lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
else
statep = "UNKNOWN ";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
- "NVME LPORT lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- localport->port_id, statep);
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
+ "NVME LPORT lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ localport->port_id, statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
rport = lpfc_ndlp_get_nrport(ndlp);
@@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
/* Tab in to show lport ownership. */
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME RPORT ");
- if (phba->brd_no >= 10)
- len += snprintf(buf + len, PAGE_SIZE - len, " ");
-
- len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
- nrport->port_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
- nrport->node_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
- nrport->port_id);
+ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ if (phba->brd_no >= 10) {
+ if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
+ nrport->port_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
+ nrport->node_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "DID x%06x ",
+ nrport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* An NVME rport can have multiple roles. */
- if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "INITIATOR ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "TARGET ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DISCSRVC ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
+ if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
+ if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
+ if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
- FC_PORT_ROLE_NVME_DISCOVERY))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "UNKNOWN ROLE x%x",
- nrport->port_role);
-
- len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
- /* Terminate the string. */
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ FC_PORT_ROLE_NVME_DISCOVERY)) {
+ scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
+ nrport->port_role);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "%s\n", statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- spin_unlock_irq(shost->host_lock);
+ rcu_read_unlock();
if (!lport)
- return len;
-
- len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %010x Cmpl %010x Abort %08x\n",
- atomic_read(&lport->fc4NvmeLsRequests),
- atomic_read(&lport->fc4NvmeLsCmpls),
- atomic_read(&lport->xmt_ls_abort));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->xmt_ls_err),
- atomic_read(&lport->cmpl_ls_xb),
- atomic_read(&lport->cmpl_ls_err));
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
+ atomic_read(&lport->fc4NvmeLsRequests),
+ atomic_read(&lport->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
totin = 0;
totout = 0;
@@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Total FCP Cmpl %016llx Issue %016llx "
- "OutIO %016llx\n",
- totin, totout, totout - totin);
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- " abort %08x noxri %08x nondlp %08x qdepth %08x "
- "wqerr %08x err %08x\n",
- atomic_read(&lport->xmt_fcp_abort),
- atomic_read(&lport->xmt_fcp_noxri),
- atomic_read(&lport->xmt_fcp_bad_ndlp),
- atomic_read(&lport->xmt_fcp_qdepth),
- atomic_read(&lport->xmt_fcp_err),
- atomic_read(&lport->xmt_fcp_wqerr));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "FCP CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->cmpl_fcp_xb),
- atomic_read(&lport->cmpl_fcp_err));
+ scnprintf(tmp, sizeof(tmp),
+ "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n",
+ totin, totout, totout - totin);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x err %08x\n",
+ atomic_read(&lport->xmt_fcp_abort),
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_err),
+ atomic_read(&lport->xmt_fcp_wqerr));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ if (unlikely(len >= (PAGE_SIZE - 1))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6314 Catching potential buffer "
+ "overflow > PAGE_SIZE = %lu bytes\n",
+ PAGE_SIZE);
+ strlcpy(buf + PAGE_SIZE - 1 -
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
+ LPFC_NVME_INFO_MORE_STR,
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
+ + 1);
+ }
+
return len;
}
@@ -5836,6 +5918,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
+ } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
+ switch (phba->fc_linkspeed) {
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
@@ -5891,7 +5991,6 @@ lpfc_get_stats(struct Scsi_Host *shost)
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
- unsigned long seconds;
int rc = 0;
/*
@@ -5992,12 +6091,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->dumped_frames = -1;
- seconds = get_seconds();
- if (seconds < psli->stats_start)
- hs->seconds_since_last_reset = seconds +
- ((unsigned long)-1 - psli->stats_start);
- else
- hs->seconds_since_last_reset = seconds - psli->stats_start;
+ hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6076,7 +6170,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
else
lso->link_events = (phba->fc_eventTag >> 1);
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6454,6 +6548,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_auto_imax = 0;
phba->initial_imax = phba->cfg_fcp_imax;
+ phba->cfg_enable_pbde = 0;
+
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index 931db52692f5..9659a8fff971 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index edb1a18a6414..90745feca808 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e7d95a4e8042..32347c87e3b4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 6b32b0ae7506..43cf46a3a71f 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4ae9ba425e78..bea24bc4410a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -469,7 +469,6 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d4a200ae5a6f..1cbdc892ff95 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f32eaeb2225a..30efc7bf91bd 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 5a7547f9d8d8..28e2b60fc5c0 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -150,6 +150,9 @@ struct lpfc_node_rrq {
unsigned long rrq_stop_time;
};
+#define lpfc_ndlp_check_qdepth(phba, ndlp) \
+ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
+
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d84a10fef07..4dda969e947c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
" mbx status x%x\n",
shdr_status, shdr_add_status, mb->mbxStatus);
- if (mb->mbxStatus && !(shdr_status &&
- shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
+ if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
+ (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
+ (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
mempool_free(pmb, phba->mbox_mem_pool);
goto error;
}
@@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lcb_res = (struct fc_lcb_res_frame *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
icmd = &elsiocb->iocb;
icmd->ulpContext = lcb_context->rx_id;
icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
@@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
+ lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
+ lcb_res->lcb_duration = lcb_context->duration;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
uint32_t beacon_state)
{
struct lpfc_hba *phba = vport->phba;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
LPFC_MBOXQ_t *mbox = NULL;
uint32_t len;
int rc;
@@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
if (!mbox)
return 1;
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
len = sizeof(struct lpfc_mbx_set_beacon_config) -
sizeof(struct lpfc_sli4_cfg_mhdr);
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
phba->sli4_hba.physical_port);
bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
beacon_state);
- bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
- bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
+ mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
+
+ /*
+ * Check bv1s bit before issuing the mailbox
+ * if bv1s == 1, LCB V1 supported
+ * else, LCB V0 supported
+ */
+
+ if (phba->sli4_hba.pc_sli4_params.bv1s) {
+ /* COMMON_SET_BEACON_CONFIG_V1 */
+ cfg_shdr->request.word9 = BEACON_VERSION_V1;
+ lcb_context->capability |= LCB_CAPABILITY_DURATION;
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 0);
+ bf_set(lpfc_mbx_set_beacon_duration_v1,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ } else {
+ /* COMMON_SET_BEACON_CONFIG_V0 */
+ if (be16_to_cpu(lcb_context->duration) != 0) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ cfg_shdr->request.word9 = BEACON_VERSION_V0;
+ lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
+ bf_set(lpfc_mbx_set_beacon_state,
+ &mbox->u.mqe.un.beacon_config, beacon_state);
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 1);
+ bf_set(lpfc_mbx_set_beacon_duration,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ }
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
beacon->lcb_frequency,
be16_to_cpu(beacon->lcb_duration));
- if (phba->sli_rev < LPFC_SLI_REV4 ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
-
- if (phba->hba_flag & HBA_FCOE_MODE) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
beacon->lcb_sub_command != LPFC_LCB_OFF) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (beacon->lcb_sub_command == LPFC_LCB_ON &&
- be16_to_cpu(beacon->lcb_duration) != 0) {
+
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+ LPFC_SLI_INTF_IF_TYPE_2)) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
@@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
lcb_context->sub_command = beacon->lcb_sub_command;
+ lcb_context->capability = 0;
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
+ lcb_context->duration = beacon->lcb_duration;
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2fef54fab86d..eb71877f12f8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 08a3f1520159..009aa0eee040 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
struct fc_lcb_request_frame {
uint32_t lcb_command; /* ELS command opcode (0x81) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
-#define LPFC_LCB_ON 0x1
-#define LPFC_LCB_OFF 0x2
- uint8_t reserved[3];
-
+#define LPFC_LCB_ON 0x1
+#define LPFC_LCB_OFF 0x2
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
-#define LPFC_LCB_GREEN 0x1
-#define LPFC_LCB_AMBER 0x2
+#define LPFC_LCB_GREEN 0x1
+#define LPFC_LCB_AMBER 0x2
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
+#define LCB_CAPABILITY_DURATION 1
+#define BEACON_VERSION_V1 1
+#define BEACON_VERSION_V0 0
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
};
@@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame {
struct fc_lcb_res_frame {
uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
- uint8_t reserved[3];
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f43f0bacb77a..083f8c8706e5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config {
#define lpfc_mbx_set_beacon_duration_SHIFT 16
#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
#define lpfc_mbx_set_beacon_duration_WORD word4
-#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
-#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
-#define lpfc_mbx_set_beacon_status_duration_WORD word4
+
+/* COMMON_SET_BEACON_CONFIG_V1 */
+#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16
+#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF
+#define lpfc_mbx_set_beacon_duration_v1_WORD word4
+ uint32_t word5; /* RESERVED */
};
struct lpfc_id_range {
@@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
*/
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
+#define ADD_STATUS_INVALID_REQUEST 0x4B
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-#define LPFC_NODELAY_MAX_IO 32
+
+#define cfg_bv1s_SHIFT 10
+#define cfg_bv1s_MASK 0x00000001
+#define cfg_bv1s_WORD word19
+
+ uint32_t word20;
+#define cfg_max_tow_xri_SHIFT 0
+#define cfg_max_tow_xri_MASK 0x0000ffff
+#define cfg_max_tow_xri_WORD word20
+
+ uint32_t word21; /* RESERVED */
+ uint32_t word22; /* RESERVED */
+ uint32_t word23; /* RESERVED */
+
+ uint32_t word24;
+#define cfg_frag_field_offset_SHIFT 0
+#define cfg_frag_field_offset_MASK 0x0000ffff
+#define cfg_frag_field_offset_WORD word24
+
+#define cfg_frag_field_size_SHIFT 16
+#define cfg_frag_field_size_MASK 0x0000ffff
+#define cfg_frag_field_size_WORD word24
+
+ uint32_t word25;
+#define cfg_sgl_field_offset_SHIFT 0
+#define cfg_sgl_field_offset_MASK 0x0000ffff
+#define cfg_sgl_field_offset_WORD word25
+
+#define cfg_sgl_field_size_SHIFT 16
+#define cfg_sgl_field_size_MASK 0x0000ffff
+#define cfg_sgl_field_size_WORD word25
+
+ uint32_t word26; /* Chain SGE initial value LOW */
+ uint32_t word27; /* Chain SGE initial value HIGH */
+#define LPFC_NODELAY_MAX_IO 32
};
#define LPFC_SET_UE_RECOVERY 0x10
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 07ee34017d88..d48414e295a0 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 52cae87da0d2..f3cae733ae2d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10387,6 +10387,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
!nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!nvmet_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6424 NVMET XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
if (!nvme_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6100 NVME XRI exchange busy "
@@ -10639,6 +10644,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
+ sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -10668,18 +10674,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
}
- /* Only embed PBDE for if_type 6 */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_6) {
- phba->fcp_embed_pbde = 1;
- phba->nvme_embed_pbde = 1;
- }
-
- /* PBDE support requires xib be set */
- if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
- phba->fcp_embed_pbde = 0;
- phba->nvme_embed_pbde = 0;
- }
+ /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ phba->cfg_enable_pbde = 0;
/*
* To support Suppress Response feature we must satisfy 3 conditions.
@@ -10713,10 +10711,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcp_embed_io = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
- "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
+ "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
- phba->fcp_embed_pbde, phba->fcp_embed_io,
- phba->nvme_support, phba->nvme_embed_pbde,
+ phba->cfg_enable_pbde,
+ phba->fcp_embed_io, phba->nvme_support,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 3b654ad08d1f..ea10f03437f5 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 47c02da11f01..deb094fdbb79 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0758edb9dfe2..9c22a2c93462 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index b93e78f671fb..95d60ab5ebf9 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1a803975bcbc..bd9bce9d9974 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
+ if (vport->phba->sli_rev == LPFC_SLI_REV3)
+ ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
/* software abort outstanding PLOGI */
lpfc_els_abort(vport->phba, ndlp);
@@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
- /* This node is an NVME target. Adjust the command
- * queue depth on this node to not exceed the available
- * xris.
- */
- ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
-
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 76a5a99605aa..028462e5994d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1135,9 +1135,6 @@ out_err:
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- atomic_dec(&ndlp->cmd_pending);
-
/* Update stats and complete the IO. There is
* no need for dma unprep because the nvme_transport
* owns the dma address.
@@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Words 13 14 15 are for PBDE support */
+
pwqeq->vport = vport;
return 0;
}
@@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
data_sg = sg_next(data_sg);
sgl++;
}
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
/* Words 13-15 */
bde = (struct ulp_bde64 *)
@@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
}
- } else {
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
- memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ } else {
/* For this clause to be valid, the payload_length
* and sg_cnt must zero.
*/
@@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
- !expedite) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6174 Fail IO, ndlp qdepth exceeded: "
- "idx %d DID %x pend %d qdepth %d\n",
- lpfc_queue_info->index, ndlp->nlp_DID,
- atomic_read(&ndlp->cmd_pending),
- ndlp->cmd_qdepth);
- atomic_inc(&lport->xmt_fcp_qdepth);
- ret = -EBUSY;
- goto out_fail;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6174 Fail IO, ndlp qdepth exceeded: "
+ "idx %d DID %x pend %d qdepth %d\n",
+ lpfc_queue_info->index, ndlp->nlp_DID,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->cmd_qdepth);
+ atomic_inc(&lport->xmt_fcp_qdepth);
+ ret = -EBUSY;
+ goto out_fail;
+ }
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
@@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
- atomic_inc(&ndlp->cmd_pending);
-
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
@@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
- atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
@@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+ }
return lpfc_ncmd;
}
@@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{
unsigned long iflag = 0;
+ if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
+
lpfc_ncmd->nonsg_phys = 0;
+ lpfc_ncmd->ndlp = NULL;
+ lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
@@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
- struct lpfc_nodelist *prev_ndlp;
+ struct lpfc_nodelist *prev_ndlp = NULL;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
+ /* New remoteport record does not guarantee valid
+ * host private memory area.
+ */
+ prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
- /* Same remoteport. Just reuse. */
+ /* Same remoteport - ndlp should match.
+ * Just reuse.
+ */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
"remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p x%x x%06x\n",
+ "Data: x%x x%x %p %p x%x x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
+ prev_ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
return 0;
}
- prev_ndlp = rport->ndlp;
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
@@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Remoteport %p WWNN 0x%llx, "
+ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p\n",
- lport, remote_port,
+ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
- ndlp);
+ ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
u32 i, wait_cnt = 0;
- if (phba->sli_rev < LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
return;
/* Cycle through all NVME rings and make sure all outstanding
@@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
pring = phba->sli4_hba.nvme_wq[i]->pring;
+ if (!pring)
+ continue;
+
/* Retrieve everything on the txcmplq */
while (!list_empty(&pring->txcmplq)) {
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 04bd463dd043..cfd4719be25c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -86,6 +86,7 @@ struct lpfc_nvme_buf {
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7271c9d885dd..b766afe10d3d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
@@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6425 Defer rcv: no buffer xri x%x: "
+ "flg %x ste %x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
+ return;
+ }
+
tgtp = phba->targetport->private;
- atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ if (tgtp)
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
@@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- if (!nvmebuf || !phba->targetport) {
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
+ if (!phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO\n");
+ "6154 LS Drop IO x%x\n", oxid);
oxid = 0;
size = 0;
sid = 0;
@@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
@@ -1759,8 +1770,7 @@ dropit:
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
"xri x%x sz %d from %06x\n",
oxid, size, sid);
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
}
ctxp->phba = phba;
@@ -1803,8 +1813,7 @@ dropit:
ctxp->oxid, rc);
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
@@ -2492,7 +2501,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
/* Word 11 - set pbde later */
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
do_pbde = 1;
} else {
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
@@ -2607,16 +2616,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (do_pbde && i == 0) {
+ if (i == 0) {
bde = (struct ulp_bde64 *)&wqe->words[13];
- memset(bde, 0, sizeof(struct ulp_bde64));
- /* Words 13-15 (PBDE)*/
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
+ if (do_pbde) {
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
}
sgl++;
ctxp->offset += cnt;
@@ -3105,11 +3117,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ list_del(&ctxp->list);
+ ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 81f520abfd64..1aaff63f1f41 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a94fb9f8bb44..5c7858e735c9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
+ if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
+ atomic_dec(&psb->ndlp->cmd_pending);
+ psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
phba->lpfc_release_scsi_buf(phba, psb);
}
@@ -3017,8 +3030,8 @@ out:
if (err_type == BGS_GUARD_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
@@ -3028,8 +3041,8 @@ out:
} else if (err_type == BGS_REFTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3040,8 +3053,8 @@ out:
} else if (err_type == BGS_APPTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3096,7 +3109,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
spin_unlock(&_dump_buf_lock);
if (lpfc_bgs_get_invalid_prof(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9072 BLKGRD: Invalid BG Profile in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3108,7 +3121,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
}
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9073 BLKGRD: Invalid BG PDIF Block in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3124,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9055 BLKGRD: Guard Tag error in cmd"
@@ -3140,8 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3157,8 +3170,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
* Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we utilize fcp_embed_pbde.
+ * Performance Hints, for FC we use lpfc_enable_pbde.
+ * We populate words 13-15 of IOCB/WQE.
*/
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->fcp_embed_pbde) {
+ phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &(iocb_cmd->unsli3.sli3Words[5]);
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
@@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
+
+ if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ memset(bde, 0, (sizeof(uint32_t) * 3));
+ }
}
/*
@@ -3866,7 +3887,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
out:
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
@@ -4019,7 +4040,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
break;
@@ -4053,14 +4074,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
lpfc_cmd->result ==
IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
- cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmd->result = DID_NO_CONNECT << 16;
break;
}
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = ScsiResult(DID_REQUEUE, 0);
+ cmd->result = DID_REQUEUE << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4094,16 +4115,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/* else: fall through */
default:
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
break;
}
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
- SAM_STAT_BUSY);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
+ SAM_STAT_BUSY;
} else
- cmd->result = ScsiResult(DID_OK, 0);
+ cmd->result = DID_OK << 16;
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_change_time = jiffies;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
int err;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+
+ /* sanity check on references */
+ if (unlikely(!rdata) || unlikely(!rport))
+ goto out_fail_command;
+
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto out_tgt_busy;
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
- "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
- " Pending command:%d"
- " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- ndlp->nlp_sid, ndlp->cmd_qdepth,
- atomic_read(&ndlp->cmd_pending),
- ndlp->nlp_nodename.u.wwn[0],
- ndlp->nlp_nodename.u.wwn[1],
- ndlp->nlp_nodename.u.wwn[2],
- ndlp->nlp_nodename.u.wwn[3],
- ndlp->nlp_nodename.u.wwn[4],
- ndlp->nlp_nodename.u.wwn[5],
- ndlp->nlp_nodename.u.wwn[6],
- ndlp->nlp_nodename.u.wwn[7],
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
+ "3377 Target Queue Full, scsi Id:%d "
+ "Qdepth:%d Pending command:%d"
+ " WWNN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x, "
+ " WWPN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x",
+ ndlp->nlp_sid, ndlp->cmd_qdepth,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->nlp_nodename.u.wwn[0],
+ ndlp->nlp_nodename.u.wwn[1],
+ ndlp->nlp_nodename.u.wwn[2],
+ ndlp->nlp_nodename.u.wwn[3],
+ ndlp->nlp_nodename.u.wwn[4],
+ ndlp->nlp_nodename.u.wwn[5],
+ ndlp->nlp_nodename.u.wwn[6],
+ ndlp->nlp_nodename.u.wwn[7],
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+ goto out_tgt_busy;
+ }
}
- atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->ndlp = ndlp;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
@@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
- atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
- struct lpfc_sli_ring *pring_s4;
+ struct lpfc_sli_ring *pring_s4 = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
iocb = &lpfc_cmd->cur_iocbq;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (!(phba->cfg_fof) ||
+ (!(iocb->iocb_flag & LPFC_IO_FOF))) {
+ pring_s4 =
+ phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
+ } else {
+ iocb->hba_wqidx = 0;
+ pring_s4 = phba->sli4_hba.oas_wq->pring;
+ }
+ if (!pring_s4) {
+ ret = FAILED;
+ goto out_unlock;
+ }
+ spin_lock(&pring_s4->ring_lock);
+ }
/* the command is in process of being cancelled */
if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
@@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
* see the completion before the eh fired. Just return SUCCESS.
*/
if (lpfc_cmd->pCmd != cmnd) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3170 SCSI Layer abort requested I/O has been "
"completed by LLD.\n");
@@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
goto wait_for_cmpl;
}
@@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
goto out_unlock;
}
@@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
+ lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
- if (pring_s4 == NULL) {
- ret = FAILED;
- goto out_unlock;
- }
/* Note: both hbalock and ring_lock must be set here */
- spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
@@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val == IOCB_ERROR) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring_s4->ring_lock, flags);
+ else
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* Indicate the IO is not being aborted by the driver. */
+ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->waitq = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
@@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- lpfc_cmd->waitq = &waitq;
/* Wait for abort to complete */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
@@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->ndlp = pnode;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c38e4da71f5f..cc99859774ff 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -134,11 +134,13 @@ struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6f3c00a233ec..9830bdb6e072 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -145,6 +145,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
uint32_t idx;
uint32_t i = 0;
uint8_t *tmp;
+ u32 if_type;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -199,8 +200,14 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
q->queue_id);
} else {
bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+
+ /* Leave bits <23:16> clear for if_type 6 dpp */
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &q->phba->sli4_hba.sli_intf);
+ if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell,
+ host_index);
}
} else if (q->db_format == LPFC_DB_RING_FORMAT) {
bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
@@ -4591,7 +4598,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Give the INITFF and Post time to settle. */
mdelay(100);
@@ -4638,7 +4645,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Reset HBA AER if it was enabled, note hba_flag was reset above */
if (hba_aer_enabled)
@@ -9110,8 +9117,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
@@ -9174,8 +9181,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
@@ -10696,6 +10703,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
+ irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
+ spin_unlock_irq(&phba->hbalock);
+ goto release_iocb;
+ }
if (abort_iotag != 0 &&
abort_iotag <= phba->sli.last_iotag)
abort_iocb =
@@ -10717,6 +10730,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&phba->hbalock);
}
+release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -10773,6 +10787,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iabt = NULL;
int retval;
unsigned long iflags;
+ struct lpfc_nodelist *ndlp;
lockdep_assert_held(&phba->hbalock);
@@ -10803,9 +10818,13 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
iabt->un.acxri.abortContextTag = cmdiocb->iotag;
- }
- else
+ } else {
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
+ iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ }
+ }
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
@@ -11084,10 +11103,11 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd;
int rc = 1;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP))
+ if (iocbq->vport != vport)
return rc;
- if (iocbq->vport != vport)
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
@@ -11097,13 +11117,13 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
switch (ctx_cmd) {
case LPFC_CTX_LUN:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
(scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0;
break;
case LPFC_CTX_TGT:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0;
break;
@@ -11218,6 +11238,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int errcnt = 0, ret_val = 0;
int i;
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ return errcnt;
+
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 431754195505..34b7ab69b9b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -339,7 +339,7 @@ struct lpfc_sli {
struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
size_t iocbq_lookup_len; /* current lengs of the array */
uint16_t last_iotag; /* last allocated IOTAG */
- unsigned long stats_start; /* in seconds */
+ time64_t stats_start; /* in seconds */
struct lpfc_lnk_stat lnk_stat_offsets;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf64aca82bd0..399c0015c546 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -490,6 +490,7 @@ struct lpfc_pc_sli4_params {
uint8_t eqav;
uint8_t cqav;
uint8_t wqsize;
+ uint8_t bv1s;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -774,7 +775,9 @@ struct lpfc_rdp_context {
struct lpfc_lcb_context {
uint8_t sub_command;
uint8_t type;
+ uint8_t capability;
uint8_t frequency;
+ uint16_t duration;
uint16_t ox_id;
uint16_t rx_id;
struct lpfc_nodelist *ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 18c23afcf46b..501249509af4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.4"
+#define LPFC_DRIVER_VERSION "12.0.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -33,5 +33,5 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
- "Reserved. The term \"Broadcom\" refers to Broadcom Limited " \
+ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 81bc12dedf41..1ff0f7de9105 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 62295971f66c..f4b8528dd2e7 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8e8cf1145d7f..8c7154143a4e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -371,7 +371,7 @@ mega_runpendq(adapter_t *adapter)
* The command queuing entry point for the mid-layer.
*/
static int
-megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
+megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -425,7 +425,7 @@ static DEF_SCSI_QCMD(megaraid_queue)
* commands.
*/
static inline scb_t *
-mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
+mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
{
struct list_head *head = &adapter->free_list;
scb_t *scb;
@@ -457,7 +457,7 @@ mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
* and the channel number.
*/
static inline int
-mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
+mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
{
int tgt;
int ldrv_num;
@@ -520,7 +520,7 @@ mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
* boot settings.
*/
static scb_t *
-mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
+mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
{
mega_ext_passthru *epthru;
mega_passthru *pthru;
@@ -951,8 +951,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
* prepare a command for the scsi physical devices.
*/
static mega_passthru *
-mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_passthru *pthru;
@@ -1015,8 +1015,9 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
* commands for devices which can take extended CDBs (>10 bytes)
*/
static mega_ext_passthru *
-mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_ext_passthru *epthru;
@@ -1417,7 +1418,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
{
mega_ext_passthru *epthru = NULL;
struct scatterlist *sgl;
- Scsi_Cmnd *cmd = NULL;
+ struct scsi_cmnd *cmd = NULL;
mega_passthru *pthru = NULL;
mbox_t *mbox = NULL;
u8 c;
@@ -1652,14 +1653,14 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
static void
mega_rundoneq (adapter_t *adapter)
{
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
struct list_head *pos;
list_for_each(pos, &adapter->completed_list) {
struct scsi_pointer* spos = (struct scsi_pointer *)pos;
- cmd = list_entry(spos, Scsi_Cmnd, SCp);
+ cmd = list_entry(spos, struct scsi_cmnd, SCp);
cmd->scsi_done(cmd);
}
@@ -1722,7 +1723,7 @@ static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
struct scatterlist *sg;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
int sgcnt;
int idx;
@@ -1869,7 +1870,7 @@ megaraid_info(struct Scsi_Host *host)
* aborted. All the commands issued to the F/W must complete.
*/
static int
-megaraid_abort(Scsi_Cmnd *cmd)
+megaraid_abort(struct scsi_cmnd *cmd)
{
adapter_t *adapter;
int rval;
@@ -1933,7 +1934,7 @@ megaraid_reset(struct scsi_cmnd *cmd)
* issued to the controller, abort/reset it. Otherwise return failure
*/
static int
-megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
{
struct list_head *pos, *next;
scb_t *scb;
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 18e85d9267ff..cce23a086fbe 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -191,7 +191,7 @@ typedef struct {
u32 dma_type;
u32 dma_direction;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
dma_addr_t dma_h_bulkdata;
dma_addr_t dma_h_sgdata;
@@ -942,7 +942,7 @@ static int issue_scb(adapter_t *, scb_t *);
static int mega_setup_mailbox(adapter_t *);
static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *);
-static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *);
+static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void __mega_runpendq(adapter_t *);
static int issue_scb_block(adapter_t *, u_char *);
@@ -951,9 +951,9 @@ static irqreturn_t megaraid_isr_iomapped(int, void *);
static void mega_free_scb(adapter_t *, scb_t *);
-static int megaraid_abort(Scsi_Cmnd *);
-static int megaraid_reset(Scsi_Cmnd *);
-static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
+static int megaraid_abort(struct scsi_cmnd *);
+static int megaraid_reset(struct scsi_cmnd *);
+static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int);
static int megaraid_biosparam(struct scsi_device *, struct block_device *,
sector_t, int []);
@@ -983,9 +983,9 @@ static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
static int mega_support_ext_cdb(adapter_t *);
static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,
- Scsi_Cmnd *, int, int);
+ struct scsi_cmnd *, int, int);
static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *,
- scb_t *, Scsi_Cmnd *, int, int);
+ scb_t *, struct scsi_cmnd *, int, int);
static void mega_enum_raid_scsi(adapter_t *);
static void mega_get_boot_drv(adapter_t *);
static int mega_support_random_del(adapter_t *);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 75dc25f78336..67d356d84717 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.705.02.00-rc1"
-#define MEGASAS_RELDATE "April 4, 2018"
+#define MEGASAS_VERSION "07.706.03.00-rc1"
+#define MEGASAS_RELDATE "May 21, 2018"
/*
* Device IDs
@@ -709,7 +709,8 @@ struct MR_TARGET_PROPERTIES {
u32 max_io_size_kb;
u32 device_qdepth;
u32 sector_size;
- u8 reserved[500];
+ u8 reset_tmo;
+ u8 reserved[499];
} __packed;
/*
@@ -1400,6 +1401,19 @@ struct megasas_ctrl_info {
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
+
+ u32 size;
+ u32 pad1;
+
+ u8 reserved6[64];
+
+ u32 rsvdForAdptOp[64];
+
+ u8 reserved7[3];
+
+ u8 TaskAbortTO; /* Timeout value in seconds used by Abort Task TM */
+ u8 MaxResetTO; /* Max Supported Reset timeout in seconds. */
+ u8 reserved8[3];
} __packed;
/*
@@ -1472,6 +1486,7 @@ enum FW_BOOT_CONTEXT {
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
+#define MEGASAS_DEFAULT_TM_TIMEOUT 50
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
@@ -1915,7 +1930,9 @@ struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
atomic_t r1_ldio_hint;
- u8 interface_type;
+ u8 interface_type;
+ u8 task_abort_tmo;
+ u8 target_reset_tmo;
};
struct megasas_cmd;
@@ -2291,6 +2308,8 @@ struct megasas_instance {
u8 adapter_type;
bool consistent_mask_64bit;
bool support_nvme_passthru;
+ u8 task_abort_tmo;
+ u8 max_reset_tmo;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2512,7 +2531,11 @@ int megasas_get_ctrl_info(struct megasas_instance *instance);
/* PD sequence */
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop);
+int megasas_get_target_prop(struct megasas_instance *instance,
+ struct scsi_device *sdev);
+
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71d97573a667..9aa9590c5373 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -120,8 +120,7 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
-static int megasas_get_target_prop(struct megasas_instance *instance,
- struct scsi_device *sdev);
+
/*
* PCI ID table for all supported controllers
*/
@@ -1794,7 +1793,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
*
* Returns void
*/
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
{
u16 pd_index = 0, ld;
u32 device_id;
@@ -1834,6 +1834,22 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
mr_device_priv_data->is_tm_capable =
pd_sync->seq[pd_index].capability.tmCapable;
}
+
+ if (is_target_prop && instance->tgt_prop->reset_tmo) {
+ /*
+ * If FW provides a target reset timeout value, driver will use
+ * it. If not set, fallback to default values.
+ */
+ mr_device_priv_data->target_reset_tmo =
+ min_t(u8, instance->max_reset_tmo,
+ instance->tgt_prop->reset_tmo);
+ mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
+ } else {
+ mr_device_priv_data->target_reset_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ mr_device_priv_data->task_abort_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ }
}
/*
@@ -1967,10 +1983,10 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->reset_mutex);
-
/* This sdev property may change post OCR */
- megasas_set_dynamic_target_properties(sdev);
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+
+ mutex_unlock(&instance->reset_mutex);
return 0;
}
@@ -2818,7 +2834,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
"SCSI command pointer: (%p)\t SCSI host state: %d\t"
" SCSI host busy: %d\t FW outstanding: %d\n",
scmd, scmd->device->host->shost_state,
- atomic_read((atomic_t *)&scmd->device->host->host_busy),
+ scsi_host_busy(scmd->device->host),
atomic_read(&instance->fw_outstanding));
/*
@@ -4720,6 +4736,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapter_operations4.support_pd_map_target_id;
instance->support_nvme_passthru =
ci->adapter_operations4.support_nvme_passthru;
+ instance->task_abort_tmo = ci->TaskAbortTO;
+ instance->max_reset_tmo = ci->MaxResetTO;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4738,6 +4756,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->secure_jbod_support ? "Yes" : "No");
dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
instance->support_nvme_passthru ? "Yes" : "No");
+ dev_info(&instance->pdev->dev,
+ "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
+ instance->task_abort_tmo, instance->max_reset_tmo);
+
break;
case DCMD_TIMEOUT:
@@ -4755,14 +4777,15 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
__func__, __LINE__);
break;
}
+ break;
case DCMD_FAILED:
megaraid_sas_kill_hba(instance);
break;
}
- megasas_return_cmd(instance, cmd);
-
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -5831,7 +5854,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*
* Returns 0 on success non-zero on failure.
*/
-static int
+int
megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev)
{
@@ -6789,6 +6812,9 @@ megasas_resume(struct pci_dev *pdev)
goto fail_init_mfi;
}
+ if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+ goto fail_init_mfi;
+
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
@@ -6842,12 +6868,12 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
{
int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
int i;
-
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
- return 1;
+ u8 adp_state;
for (i = 0; i < wait_time; i++) {
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ adp_state = atomic_read(&instance->adprecovery);
+ if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
+ (adp_state == MEGASAS_HW_CRITICAL_ERROR))
break;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
@@ -6856,9 +6882,10 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
msleep(1000);
}
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
- __func__);
+ if (adp_state != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev,
+ "%s HBA failed to become operational, adp_state %d\n",
+ __func__, adp_state);
return 1;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 94c23ad51179..c7f95bace353 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4108,7 +4108,8 @@ megasas_tm_response_code(struct megasas_instance *instance,
*/
static int
megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
- uint channel, uint id, u16 smid_task, u8 type)
+ uint channel, uint id, u16 smid_task, u8 type,
+ struct MR_PRIV_DEVICE *mr_device_priv_data)
{
struct MR_TASK_MANAGE_REQUEST *mr_request;
struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
@@ -4119,6 +4120,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
struct fusion_context *fusion = NULL;
struct megasas_cmd_fusion *scsi_lookup;
int rc;
+ int timeout = MEGASAS_DEFAULT_TM_TIMEOUT;
struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
fusion = instance->ctrl_context;
@@ -4170,7 +4172,16 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
init_completion(&cmd_fusion->done);
megasas_fire_cmd_fusion(instance, req_desc);
- timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ timeout = mr_device_priv_data->task_abort_tmo;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ timeout = mr_device_priv_data->target_reset_tmo;
+ break;
+ }
+
+ timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ);
if (!timeleft) {
dev_err(&instance->pdev->dev,
@@ -4363,7 +4374,8 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
@@ -4435,7 +4447,8 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
out:
@@ -4490,6 +4503,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
struct scsi_device *sdev;
+ int ret_target_prop = DCMD_FAILED;
+ bool is_target_prop = false;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -4661,9 +4676,6 @@ transition_to_ready:
megasas_setup_jbod_map(instance);
- shost_for_each_device(sdev, shost)
- megasas_set_dynamic_target_properties(sdev);
-
/* reset stream detection array */
if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
@@ -4677,6 +4689,16 @@ transition_to_ready:
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
+
+ shost_for_each_device(sdev, shost) {
+ if ((instance->tgt_prop) &&
+ (instance->nvme_page_size))
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+ }
+
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
dev_info(&instance->pdev->dev, "Interrupts are enabled and"
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 1753e42826dd..82e01dbe90af 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -594,9 +594,9 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) + cmd->SCp.Status;
+ cmd->result = (ms->stat << 16) | cmd->SCp.Status;
if (ms->stat == DID_OK)
- cmd->result += (cmd->SCp.Message << 8);
+ cmd->result |= cmd->SCp.Message << 8;
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e44c91edf92d..59d7844ee022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -102,8 +102,39 @@ static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
/**
+ * mpt3sas_base_check_cmd_timeout - Function
+ * to check timeout and command termination due
+ * to Host reset.
+ *
+ * @ioc: per adapter object.
+ * @status: Status of issued command.
+ * @mpi_request:mf request pointer.
+ * @sz: size of buffer.
+ *
+ * @Returns - 1/0 Reset to be done or Not
+ */
+u8
+mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz)
+{
+ u8 issue_reset = 0;
+
+ if (!(status & MPT3_CMD_RESET))
+ issue_reset = 1;
+
+ pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
+ ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ _debug_dump_mf(mpi_request, sz);
+
+ return issue_reset;
+}
+
+/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ * @val: ?
+ * @kp: ?
*
+ * Return: ?
*/
static int
_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
@@ -132,8 +163,6 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* @ioc: per adapter object
* @reply: reply message frame(lower 32bit addr)
* @index: System request message index.
- *
- * @Returns - Nothing
*/
static void
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
@@ -156,7 +185,7 @@ _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
* _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
* to system/BAR0 region.
*
- * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
+ * @dst_iomem: Pointer to the destination location in BAR0 space.
* @src: Pointer to the Source data.
* @size: Size of data to be copied.
*/
@@ -197,7 +226,7 @@ _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return: chain address.
+ * Return: the chain address.
*/
static inline void __iomem*
_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -223,7 +252,7 @@ _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return - Physical chain address.
+ * Return: Physical chain address.
*/
static inline phys_addr_t
_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -248,7 +277,7 @@ _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static void __iomem *
@@ -270,7 +299,7 @@ _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static phys_addr_t
_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -291,7 +320,7 @@ _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @chain_buffer_dma: Chain buffer dma address.
*
- * @Returns - Pointer to chain buffer. Or Null on Failure.
+ * Return: Pointer to chain buffer. Or Null on Failure.
*/
static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
@@ -322,8 +351,6 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object.
* @mpi_request: mf request pointer.
* @smid: system request message index.
- *
- * @Returns: Nothing.
*/
static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
void *mpi_request, u16 smid)
@@ -496,8 +523,9 @@ eob_clone_chain:
* mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
- * Return 0 if controller is removed from pci subsystem.
- * Return -1 for other case.
+ * Return:
+ * 0 if controller is removed from pci subsystem.
+ * -1 for other case.
*/
static int mpt3sas_remove_dead_ioc_func(void *arg)
{
@@ -517,9 +545,8 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
static void
_base_fault_reset_work(struct work_struct *work)
@@ -610,9 +637,8 @@ _base_fault_reset_work(struct work_struct *work)
/**
* mpt3sas_base_start_watchdog - start the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -633,7 +659,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->fault_reset_work_q) {
pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
ioc->name, __func__, __LINE__);
- return;
+ return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->fault_reset_work_q)
@@ -646,9 +672,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -671,8 +696,6 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_fault_info - verbose translation of firmware FAULT code
* @ioc: per adapter object
* @fault_code: fault code
- *
- * Return nothing.
*/
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
@@ -721,8 +744,6 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @request_hdr: request mf
- *
- * Return nothing.
*/
static void
_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
@@ -945,8 +966,6 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
* _base_display_event_data - verbose translation of firmware asyn events
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
- *
- * Return nothing.
*/
static void
_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
@@ -1065,8 +1084,6 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* _base_sas_log_info - verbose translation of firmware log info
* @ioc: per adapter object
* @log_info: log info
- *
- * Return nothing.
*/
static void
_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
@@ -1124,8 +1141,6 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
- *
- * Return nothing.
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1167,8 +1182,9 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1200,8 +1216,9 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
@@ -1279,7 +1296,7 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Return callback index.
+ * Return: callback index.
*/
static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -1312,8 +1329,6 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
- *
- * Return nothing.
*/
static void
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1332,8 +1347,6 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
- *
- * Return nothing.
*/
static void
_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1358,9 +1371,8 @@ union reply_descriptor {
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
- * @r: pt_regs pointer (not used)
*
- * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
*/
static irqreturn_t
_base_interrupt(int irq, void *bus_id)
@@ -1535,6 +1547,7 @@ _base_interrupt(int irq, void *bus_id)
* _base_is_controller_msix_enabled - is controller support muli-reply queues
* @ioc: per adapter object
*
+ * Return: Whether or not MSI/X is enabled.
*/
static inline int
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
@@ -1549,8 +1562,6 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
* Context: non ISR conext
*
* Called when a Task Management request has completed.
- *
- * Return nothing.
*/
void
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
@@ -1577,8 +1588,6 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_release_callback_handler - clear interrupt callback handler
* @cb_idx: callback index
- *
- * Return nothing.
*/
void
mpt3sas_base_release_callback_handler(u8 cb_idx)
@@ -1590,7 +1599,7 @@ mpt3sas_base_release_callback_handler(u8 cb_idx)
* mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
* @cb_func: callback function
*
- * Returns cb_func.
+ * Return: Index of @cb_func.
*/
u8
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
@@ -1607,8 +1616,6 @@ mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
/**
* mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
- *
- * Return nothing.
*/
void
mpt3sas_base_initialize_callback_handler(void)
@@ -1628,8 +1635,6 @@ mpt3sas_base_initialize_callback_handler(void)
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -1646,8 +1651,6 @@ _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1666,8 +1669,6 @@ _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1685,7 +1686,7 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @ioc: per adapter object
* @scmd: SCSI commands of the IO request
*
- * Returns chain tracker from chain_lookup table using key as
+ * Return: chain tracker from chain_lookup table using key as
* smid and smid's chain_offset.
*/
static struct chain_tracker *
@@ -1715,8 +1716,6 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -1777,7 +1776,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* describes the first data memory segment, and PRP2 contains a pointer to a PRP
* list located elsewhere in memory to describe the remaining data memory
* segments. The PRP list will be contiguous.
-
+ *
* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
* consists of a list of PRP entries to describe a number of noncontigous
* physical memory segments as a single memory buffer, just as a SGL does. Note
@@ -1820,8 +1819,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Returns nothing.
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -1836,6 +1833,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u32 offset, entry_len;
u32 page_mask_result, page_mask;
size_t length;
+ struct mpt3sas_nvme_cmd *nvme_cmd =
+ (void *)nvme_encap_request->NVMe_Command;
/*
* Not all commands require a data transfer. If no data, just return
@@ -1843,15 +1842,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*/
if (!data_in_sz && !data_out_sz)
return;
- /*
- * Set pointers to PRP1 and PRP2, which are in the NVMe command.
- * PRP1 is located at a 24 byte offset from the start of the NVMe
- * command. Then set the current PRP entry pointer to PRP1.
- */
- prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP1_OFFSET);
- prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP2_OFFSET);
+ prp1_entry = &nvme_cmd->prp1;
+ prp2_entry = &nvme_cmd->prp2;
prp_entry = prp1_entry;
/*
* For the PRP entries, use the specially allocated buffer of
@@ -1992,7 +1984,7 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: msg Index
* @sge_count: scatter gather element count.
*
- * Returns: true: PRPs are built
+ * Return: true: PRPs are built
* false: IEEE SGLs needs to be built
*/
static void
@@ -2127,11 +2119,9 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
{
u32 data_length = 0;
- struct scatterlist *sg_scmd;
bool build_prp = true;
data_length = scsi_bufflen(scmd);
- sg_scmd = scsi_sglist(scmd);
/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
* we built IEEE SGL
@@ -2155,18 +2145,16 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
* @scmd: scsi command
* @pcie_device: points to the PCIe device's info
*
- * Returns 0 if native SGL was built, 1 if no SGL was built
+ * Return: 0 if native SGL was built, 1 if no SGL was built
*/
static int
_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
struct _pcie_device *pcie_device)
{
- struct scatterlist *sg_scmd;
int sges_left;
/* Get the SG list pointer and info. */
- sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
@@ -2201,8 +2189,6 @@ out:
* @chain_offset: number of 128 byte elements from start of segment
* @length: data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
@@ -2224,8 +2210,6 @@ _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -2249,7 +2233,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
@@ -2394,7 +2378,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
@@ -2525,8 +2509,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -2576,7 +2558,7 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @ioc: per adapter object
* @pdev: PCI device struct
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
@@ -2924,10 +2906,9 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->combined_reply_queue) {
- kfree(ioc->replyPostRegisterIndex);
- ioc->replyPostRegisterIndex = NULL;
- }
+ kfree(ioc->replyPostRegisterIndex);
+ ioc->replyPostRegisterIndex = NULL;
+
if (ioc->chip_phys) {
iounmap(ioc->chip);
@@ -2945,7 +2926,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -3034,7 +3015,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -3058,8 +3039,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
- } else
- ioc->combined_reply_queue = 0;
+ }
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -3097,7 +3077,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @smid: system request message index(smid zero is invalid)
*
- * Returns virt pointer to message frame.
+ * Return: virt pointer to message frame.
*/
void *
mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3110,7 +3090,7 @@ mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to sense buffer.
+ * Return: virt pointer to sense buffer.
*/
void *
mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3123,7 +3103,7 @@ mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the low 32bit address of the sense buffer.
+ * Return: phys pointer to the low 32bit address of the sense buffer.
*/
__le32
mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3137,7 +3117,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to a PCIe SGL.
+ * Return: virt pointer to a PCIe SGL.
*/
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3150,7 +3130,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the address of the PCIe buffer.
+ * Return: phys pointer to the address of the PCIe buffer.
*/
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3184,7 +3164,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3216,7 +3196,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
* @cb_idx: callback index
* @scmd: pointer to scsi command object
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
@@ -3239,7 +3219,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3270,7 +3250,7 @@ _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
* See _wait_for_commands_to_complete() call with regards to this code.
*/
if (ioc->shost_recovery && ioc->pending_io_count) {
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (ioc->pending_io_count == 0)
wake_up(&ioc->reset_wq);
}
@@ -3284,14 +3264,13 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
st->cb_idx = 0xFF;
st->direct_io = 0;
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
+ st->smid = 0;
}
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3353,7 +3332,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
/**
* _base_writeq - 64 bit write to MMIO
- * @ioc: per adapter object
* @b: data payload
* @addr: address in MMIO space
* @writeq_lock: spin lock
@@ -3382,8 +3360,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3412,8 +3388,6 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3436,8 +3410,6 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3461,7 +3433,6 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
- * Return nothing.
*/
void
mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3472,11 +3443,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u64 *request;
if (ioc->is_mcpu_endpoint) {
- MPI2RequestHeader_t *request_hdr;
-
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
/* TBD 256 is offset within sys register. */
mpi_req_iomem = (void __force *)ioc->chip
+ MPI_FRAME_START_OFFSET
@@ -3507,8 +3475,6 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3530,8 +3496,6 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* mpt3sas_base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3539,13 +3503,10 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
Mpi2RequestDescriptorUnion_t descriptor;
void *mpi_req_iomem;
u64 *request;
- MPI2RequestHeader_t *request_hdr;
if (ioc->is_mcpu_endpoint) {
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
-
_clone_sg_entries(ioc, (void *) mfp, smid);
/* TBD 256 is offset within sys register */
mpi_req_iomem = (void __force *)ioc->chip +
@@ -3571,8 +3532,6 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
@@ -3833,7 +3792,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
* version from FW Image Header.
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
@@ -3930,8 +3889,6 @@ out:
/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
@@ -4047,8 +4004,6 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
* @device_missing_delay: amount of time till device is reported missing
* @io_missing_delay: interval IO is returned when there is a missing device
*
- * Return nothing.
- *
* Passed on the command line, this function will modify the device missing
* delay, as well as the io missing delay. This should be called at driver
* load time.
@@ -4131,11 +4086,10 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
out:
kfree(sas_iounit_pg1);
}
+
/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
@@ -4207,8 +4161,6 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated during encloure add.
- *
- * Return nothing.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -4228,8 +4180,6 @@ mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated from _base_allocate_memory_pools.
- *
- * Return nothing.
*/
static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4350,9 +4300,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
- * Returns 1 if reply queues in a set have a same upper 32bits
- * in their base memory address,
- * else 0
+ * Return: 1 if reply queues in a set have a same upper 32bits in their base
+ * memory address, else 0.
*/
static int
@@ -4373,7 +4322,7 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error.
*/
static int
_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4975,7 +4924,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
- * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Return: all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
@@ -4990,10 +4939,11 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
/**
* _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc: ?
* @ioc_state: controller state { READY, OPERATIONAL, or RESET }
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
@@ -5021,9 +4971,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
- * @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
@@ -5090,7 +5039,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
* doorbell.
@@ -5137,8 +5086,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
- *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5173,7 +5121,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
@@ -5222,7 +5170,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
* @reply: pointer to reply payload
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
@@ -5346,7 +5294,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
* identifying information about the device, in addition allows the host to
* remove IOC resources associated with the device.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
@@ -5355,7 +5303,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5414,12 +5362,10 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress)
ioc->ioc_link_reset_in_progress = 0;
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SasIoUnitControlRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = true;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5449,7 +5395,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
* The SCSI Enclosure Processor request message causes the IOC to
* communicate with SES devices to control LED status signals.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
@@ -5457,7 +5403,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5510,12 +5456,10 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SepRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = false;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5539,8 +5483,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
/**
* _base_get_port_facts - obtain port facts reply and save in ioc
* @ioc: per adapter object
+ * @port: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
@@ -5583,7 +5528,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
* @ioc: per adapter object
* @timeout:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5637,7 +5582,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
@@ -5681,6 +5626,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
facts->WhoInit = mpi_reply.WhoInit;
facts->NumberOfPorts = mpi_reply.NumberOfPorts;
facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ if (ioc->msix_enable && (facts->MaxMSIxVectors <=
+ MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
+ ioc->combined_reply_queue = 0;
facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
facts->MaxReplyDescriptorPostQueueDepth =
le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -5736,7 +5684,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
* _base_send_ioc_init - send ioc_init to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
@@ -5837,8 +5785,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -5883,7 +5831,7 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5950,7 +5898,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5990,7 +5938,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
* Decide whether to wait on discovery to complete. Used to either
* locate boot device, or report volumes ahead of physical devices.
*
- * Returns 1 for wait, 0 for don't wait
+ * Return: 1 for wait, 0 for don't wait.
*/
static int
_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
@@ -6062,7 +6010,7 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
* _base_event_notification - send event notification
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
@@ -6119,7 +6067,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_validate_event_type - validating event types
* @ioc: per adapter object
- * @event: firmware event
+ * @event_type: firmware event
*
* This will turn on firmware event notification when application
* ask for that event. We don't mask events that are already enabled.
@@ -6157,7 +6105,7 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
* _base_diag_reset - the "big hammer" start of day reset
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -6271,7 +6219,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
@@ -6340,7 +6288,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
* _base_make_ioc_operational - put controller in OPERATIONAL state
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
@@ -6513,8 +6461,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_free_resources - free resources controller resources
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -6540,7 +6486,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_attach - attach controller instance
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
@@ -6797,8 +6743,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_detach - remove controller instance
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
@@ -6830,65 +6774,69 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_reset_handler - reset callback handler (for base)
+ * _base_pre_reset_handler - pre reset handler
* @ioc: per adapter object
- * @reset_phase: phase
- *
- * The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
*/
-static void
-_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- mpt3sas_scsih_reset_handler(ioc, reset_phase);
- mpt3sas_ctl_reset_handler(ioc, reset_phase);
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
- ioc->transport_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
- complete(&ioc->transport_cmds.done);
- }
- if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- ioc->base_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
- complete(&ioc->base_cmds.done);
- }
- if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- ioc->port_enable_failed = 1;
- ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
- if (ioc->is_driver_loading) {
- ioc->start_scan_failed =
- MPI2_IOCSTATUS_INTERNAL_ERROR;
- ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
- } else
- complete(&ioc->port_enable_cmds.done);
- }
- if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
- ioc->config_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
- ioc->config_cmds.smid = USHRT_MAX;
- complete(&ioc->config_cmds.done);
+ mpt3sas_scsih_pre_reset_handler(ioc);
+ mpt3sas_ctl_pre_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * _base_after_reset_handler - after reset handler
+ * @ioc: per adapter object
+ */
+static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_after_reset_handler(ioc);
+ mpt3sas_ctl_after_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else {
+ complete(&ioc->port_enable_cmds.done);
}
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- break;
}
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+}
+
+/**
+ * _base_reset_done_handler - reset done handler
+ * @ioc: per adapter object
+ */
+static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_reset_done_handler(ioc);
+ mpt3sas_ctl_reset_done_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
}
/**
@@ -6910,7 +6858,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (!ioc->pending_io_count)
return;
@@ -6924,7 +6872,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
@@ -6949,14 +6897,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_halt_firmware(ioc);
/* wait for an active reset in progress to complete */
- if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
- do {
- ssleep(1);
- } while (ioc->shost_recovery == 1);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
- return ioc->ioc_reset_in_progress_status;
- }
+ mutex_lock(&ioc->reset_in_progress_mutex);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 1;
@@ -6971,13 +6912,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
is_fault = 1;
}
- _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
- _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+ _base_after_reset_handler(ioc);
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
@@ -6998,14 +6939,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
r = _base_make_ioc_operational(ioc);
if (!r)
- _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+ _base_reset_done_handler(ioc);
out:
dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->ioc_reset_in_progress_status = r;
ioc->shost_recovery = 0;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index f02974c0be4a..96dc15e90bd8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -74,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 25
+#define MPT3SAS_DRIVER_VERSION "26.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 26
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -143,21 +143,17 @@
* NVMe defines
*/
#define NVME_PRP_SIZE 8 /* PRP size */
-#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
-#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
#define NVME_TASK_ABORT_MIN_TIMEOUT 6
#define NVME_TASK_ABORT_MAX_TIMEOUT 60
#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
-
-/*
- * reset phases
- */
-#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
-#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
-#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+struct mpt3sas_nvme_cmd {
+ u8 rsvd[24];
+ __le64 prp1;
+ __le64 prp2;
+};
/*
* logging format
@@ -323,6 +319,7 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
+#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8)
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
@@ -1162,7 +1159,6 @@ struct MPT3SAS_ADAPTER {
struct mutex reset_in_progress_mutex;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
- u8 ioc_reset_in_progress_status;
u8 ignore_loginfos;
u8 remove_host;
@@ -1482,13 +1478,17 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
+u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
@@ -1615,7 +1615,9 @@ void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
-void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index e87c76a832f6..d29a2dcc7d0e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -198,7 +198,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* A wrapper for obtaining dma-able memory for config page request.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -230,7 +230,7 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static void
_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -251,8 +251,8 @@ _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* The callback handler when using _config_request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -295,7 +295,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
*
* The callback index is set inside `ioc->config_cb_idx.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
@@ -406,10 +406,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4);
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->config_cmds.status, mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
@@ -519,7 +518,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -556,7 +555,7 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
@@ -593,7 +592,7 @@ mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
@@ -630,7 +629,7 @@ mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -667,7 +666,7 @@ mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -708,7 +707,7 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -744,7 +743,7 @@ mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -780,7 +779,7 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -816,7 +815,7 @@ mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -852,7 +851,7 @@ mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -889,7 +888,7 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
@@ -924,7 +923,7 @@ mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -960,7 +959,7 @@ mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -998,7 +997,7 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1039,7 +1038,7 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1080,7 +1079,7 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1121,7 +1120,7 @@ out:
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -1159,7 +1158,7 @@ out:
* @num_phys: pointer returned with the number of phys
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
@@ -1209,7 +1208,7 @@ mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1250,7 +1249,7 @@ mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1291,7 +1290,7 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1333,7 +1332,7 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1373,7 +1372,7 @@ mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1416,7 +1415,7 @@ mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1455,7 +1454,7 @@ mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1495,7 +1494,7 @@ mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1536,7 +1535,7 @@ mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1574,7 +1573,7 @@ mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
* @num_pds: returns pds count
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -1626,7 +1625,7 @@ mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1665,7 +1664,7 @@ mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
* @form_specific: specific to the form
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1704,7 +1703,7 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @volume_handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
@@ -1794,7 +1793,7 @@ mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
* @wwid: volume wwid
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 3269ef43f07e..5e8c059ce2c9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -253,8 +253,8 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* The callback handler when using ioc->ctl_cb_idx.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -317,7 +317,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* The bitmask in ioc->event_type[] indicates which events should be
* be saved in the driver event_log. This bitmask is set by application.
*
- * Returns 1 when event should be captured, or zero means no match.
+ * Return: 1 when event should be captured, or zero means no match.
*/
static int
_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
@@ -339,8 +339,6 @@ _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
* mpt3sas_ctl_add_to_event_log - add event
* @ioc: per adapter object
* @mpi_reply: reply message frame
- *
- * Return nothing.
*/
void
mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
@@ -395,8 +393,8 @@ mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -412,12 +410,12 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
/**
* _ctl_verify_adapter - validates ioc_number passed from application
- * @ioc: per adapter object
+ * @ioc_number: ?
* @iocpp: The ioc pointer is returned in this.
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*
- * Return (-1) means error, else ioc_number.
+ * Return: (-1) means error, else ioc_number.
*/
static int
_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
@@ -460,65 +458,74 @@ out:
/**
* mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
*/
-void
-mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
u8 issue_reset;
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- mpt3sas_send_diag_release(ioc, i, &issue_reset);
- }
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
- ioc->ctl_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
- complete(&ioc->ctl_cmds.done);
- }
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- ioc->diag_buffer_status[i] |=
- MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- }
- break;
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
}
}
/**
* _ctl_fasync -
- * @fd -
- * @filep -
- * @mode -
+ * @fd: ?
+ * @filep: ?
+ * @mode: ?
*
* Called when application request fasyn callback handler.
*/
@@ -530,8 +537,8 @@ _ctl_fasync(int fd, struct file *filep, int mode)
/**
* _ctl_poll -
- * @file -
- * @wait -
+ * @filep: ?
+ * @wait: ?
*
*/
static __poll_t
@@ -556,10 +563,10 @@ _ctl_poll(struct file *filep, poll_table *wait)
/**
* _ctl_set_task_mid - assign an active smid to tm request
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @tm_request - pointer to mf from user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @tm_request: pointer to mf from user space
*
- * Returns 0 when an smid if found, else fail.
+ * Return: 0 when an smid if found, else fail.
* during failure, the reply frame is filled.
*/
static int
@@ -634,8 +641,8 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @mf - pointer to mf in user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @mf: pointer to mf in user space
*/
static long
_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
@@ -970,6 +977,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
/* drop to default case for posting the request */
}
+ /* fall through */
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
@@ -995,11 +1003,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ignore_loginfos = 0;
}
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request, karg.data_sge_offset);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ karg.data_sge_offset);
goto issue_host_reset;
}
@@ -1114,7 +1121,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/**
* _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1168,7 +1175,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1199,7 +1206,7 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1237,7 +1244,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1281,7 +1288,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_do_reset - main handler for MPT3HARDRESET opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1419,7 +1426,7 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
/**
* _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1621,12 +1628,10 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -1719,7 +1724,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
/**
* _ctl_diag_register - application register with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to setup any required buffers that will be
* needed by firmware to communicate with the driver.
@@ -1743,7 +1748,7 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_unregister - application unregister with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to cleanup any memory allocated for diag
* messages and to free up any resources.
@@ -1816,7 +1821,7 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_query - query relevant info associated with diag buffers
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* The application will send only buffer_type and unique_id. Driver will
* inspect unique_id first, if valid, fill in all the info. If unique_id is
@@ -1903,8 +1908,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* mpt3sas_send_diag_release - Diag Release Message
* @ioc: per adapter object
- * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
- * @issue_reset - specifies whether host reset is required.
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset: specifies whether host reset is required.
*
*/
int
@@ -1968,12 +1973,9 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagReleaseRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- *issue_reset = 1;
+ *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -2009,7 +2011,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/**
* _ctl_diag_release - request to send Diag Release Message to firmware
- * @arg - user space buffer containing ioctl content
+ * @ioc: ?
+ * @arg: user space buffer containing ioctl content
*
* This allows ownership of the specified buffer to returned to the driver,
* allowing an application to read the buffer without fear that firmware is
@@ -2098,7 +2101,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_read_buffer - request for copy of the diag buffer
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -2235,12 +2238,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -2284,8 +2285,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
* @ioc: per adapter object
- * @cmd - ioctl opcode
- * @arg - (struct mpt3_ioctl_command32)
+ * @cmd: ioctl opcode
+ * @arg: (struct mpt3_ioctl_command32)
*
* MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
*/
@@ -2328,10 +2329,10 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
/**
* _ctl_ioctl_main - main ioctl entry point
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg - user space data buffer
- * @compat - handles 32 bit applications in 64bit os
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: user space data buffer
+ * @compat: handles 32 bit applications in 64bit os
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*/
@@ -2462,9 +2463,9 @@ out_unlock_pciaccess:
/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2482,9 +2483,9 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/**
* _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2500,9 +2501,9 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
/**
*_ ctl_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2518,9 +2519,9 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/**
*_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2537,8 +2538,9 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/* scsi host attributes */
/**
* _ctl_version_fw_show - firmware version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2559,8 +2561,9 @@ static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
/**
* _ctl_version_bios_show - bios version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2583,8 +2586,9 @@ static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
/**
* _ctl_version_mpi_show - MPI (message passing interface) version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2602,8 +2606,9 @@ static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
/**
* _ctl_version_product_show - product name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2620,8 +2625,9 @@ static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
/**
* _ctl_version_nvdata_persistent_show - ndvata persistent version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2640,8 +2646,9 @@ static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
/**
* _ctl_version_nvdata_default_show - nvdata default version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2660,8 +2667,9 @@ static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
/**
* _ctl_board_name_show - board name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2678,8 +2686,9 @@ static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
/**
* _ctl_board_assembly_show - board assembly name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2696,8 +2705,9 @@ static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
/**
* _ctl_board_tracer_show - board tracer number
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2714,8 +2724,9 @@ static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
/**
* _ctl_io_delay_show - io missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2735,8 +2746,9 @@ static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
/**
* _ctl_device_delay_show - device missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2756,8 +2768,9 @@ static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
/**
* _ctl_fw_queue_depth_show - global credits
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2776,8 +2789,9 @@ static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
/**
* _ctl_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the controller sas address
*
@@ -2799,8 +2813,9 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO,
/**
* _ctl_logging_level_show - logging level
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -2834,8 +2849,9 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
/**
* _ctl_fwfault_debug_show - show/store fwfault_debug
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* mpt3sas_fwfault_debug is command line option
* A sysfs 'read/write' shost attribute.
@@ -2870,8 +2886,9 @@ static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
/**
* _ctl_ioc_reset_count_show - ioc reset count
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2890,8 +2907,9 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
/**
* _ctl_ioc_reply_queue_count_show - number of reply queues
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -2918,8 +2936,9 @@ static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
/**
* _ctl_BRM_status_show - Backup Rail Monitor Status
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -3004,8 +3023,9 @@ struct DIAG_BUFFER_START {
/**
* _ctl_host_trace_buffer_size_show - host buffer size (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -3049,8 +3069,9 @@ static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
/**
* _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3114,8 +3135,9 @@ static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
/**
* _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3200,8 +3222,9 @@ static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3224,8 +3247,10 @@ _ctl_diag_trigger_master_show(struct device *cdev,
/**
* _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3255,8 +3280,9 @@ static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3278,8 +3304,10 @@ _ctl_diag_trigger_event_show(struct device *cdev,
/**
* _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3309,8 +3337,9 @@ static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3332,8 +3361,10 @@ _ctl_diag_trigger_scsi_show(struct device *cdev,
/**
* _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3362,8 +3393,9 @@ static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3385,8 +3417,10 @@ _ctl_diag_trigger_mpi_show(struct device *cdev,
/**
* _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3450,8 +3484,9 @@ struct device_attribute *mpt3sas_host_attrs[] = {
/**
* _ctl_device_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the sas address for the target
*
@@ -3471,8 +3506,9 @@ static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
/**
* _ctl_device_handle_show - device handle
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the firmware assigned device handle
*
@@ -3492,8 +3528,9 @@ static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
/**
* _ctl_device_ncq_io_prio_show - send prioritized io commands to device
- * @dev - pointer to embedded device
- * @buf - the buffer returned
+ * @dev: pointer to embedded device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' sdev attribute, only works with SATA
*/
@@ -3573,7 +3610,7 @@ static struct miscdevice gen2_ctl_dev = {
/**
* mpt3sas_ctl_init - main entry point for ctl.
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_init(ushort hbas_to_enumerate)
@@ -3601,7 +3638,7 @@ mpt3sas_ctl_init(ushort hbas_to_enumerate)
/**
* mpt3sas_ctl_exit - exit point for ctl
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_exit(ushort hbas_to_enumerate)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index dd738ae5c75b..53133cfd420f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -284,6 +284,8 @@ struct _scsi_io_transfer {
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
+ * @val: ?
+ * @kp: ?
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
@@ -311,7 +313,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
* @sas_address: sas address
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_sas_address(u64 sas_address,
@@ -325,7 +327,7 @@ _scsih_srch_boot_sas_address(u64 sas_address,
* @device_name: device name specified in INDENTIFY fram
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_device_name(u64 device_name,
@@ -340,7 +342,7 @@ _scsih_srch_boot_device_name(u64 device_name,
* @slot_number: slot number
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
@@ -356,11 +358,11 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
* @enclosure_logical_id: enclosure logical id
- * @slot_number: slot number
+ * @slot: slot number
* @form: specifies boot device form
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static int
_scsih_is_boot_device(u64 sas_address, u64 device_name,
@@ -398,10 +400,11 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
/**
* _scsih_get_sas_address - set the sas_address for given device handle
+ * @ioc: ?
* @handle: device handle
* @sas_address: sas address
*
- * Returns 0 success, non-zero when failure
+ * Return: 0 success, non-zero when failure
*/
static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -710,8 +713,6 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @sas_device: per sas device object
* @sdev: scsi device struct
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
@@ -806,8 +807,6 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_device_remove_by_handle - removing device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -835,8 +834,6 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_device_remove_by_sas_address - removing device object by sas address
* @ioc: per adapter object
* @sas_address: device sas_address
- *
- * Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
@@ -1109,8 +1106,6 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -1273,7 +1268,7 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* _scsih_raid_device_find_by_wwid - raid device search
* @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
+ * @wwid: ?
* Context: Calling function should acquire ioc->raid_device_lock
*
* This searches for raid_device based on wwid, then return raid_device
@@ -1418,8 +1413,6 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new object to the ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
@@ -1437,7 +1430,7 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if end device.
+ * Return: 1 if end device.
*/
static int
_scsih_is_end_device(u32 device_info)
@@ -1456,7 +1449,7 @@ _scsih_is_end_device(u32 device_info)
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if nvme device.
+ * Return: 1 if nvme device.
*/
static int
_scsih_is_nvme_device(u32 device_info)
@@ -1473,7 +1466,7 @@ _scsih_is_nvme_device(u32 device_info)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns the smid stored scmd pointer.
+ * Return: the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
struct scsi_cmnd *
@@ -1489,7 +1482,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
- if (st->cb_idx == 0xFF)
+ if (st->cb_idx == 0xFF || st->smid == 0)
scmd = NULL;
}
}
@@ -1501,7 +1494,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
- * Returns queue depth.
+ * Return: queue depth.
*/
static int
scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
@@ -1549,7 +1542,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* scsih_target_alloc - target add routine
* @starget: scsi target struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1640,8 +1633,6 @@ scsih_target_alloc(struct scsi_target *starget)
/**
* scsih_target_destroy - target destroy routine
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
scsih_target_destroy(struct scsi_target *starget)
@@ -1653,7 +1644,6 @@ scsih_target_destroy(struct scsi_target *starget)
struct _raid_device *raid_device;
struct _pcie_device *pcie_device;
unsigned long flags;
- struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1693,7 +1683,6 @@ scsih_target_destroy(struct scsi_target *starget)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1720,7 +1709,7 @@ scsih_target_destroy(struct scsi_target *starget)
* scsih_slave_alloc - device add routine
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1800,8 +1789,6 @@ scsih_slave_alloc(struct scsi_device *sdev)
/**
* scsih_slave_destroy - device destroy routine
* @sdev: scsi device struct
- *
- * Returns nothing.
*/
static void
scsih_slave_destroy(struct scsi_device *sdev)
@@ -1907,7 +1894,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/**
* scsih_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int
scsih_is_raid(struct device *dev)
@@ -1930,7 +1917,7 @@ scsih_is_nvme(struct device *dev)
/**
* scsih_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_resync(struct device *dev)
@@ -1991,7 +1978,7 @@ scsih_get_resync(struct device *dev)
/**
* scsih_get_state - get raid volume level
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_state(struct device *dev)
@@ -2057,6 +2044,7 @@ scsih_get_state(struct device *dev)
/**
* _scsih_set_level - set raid level
+ * @ioc: ?
* @sdev: scsi device struct
* @volume_type: volume type
*/
@@ -2098,9 +2086,9 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
- * @sas_device: the raid_device object
+ * @raid_device: the raid_device object
*
- * Returns 0 for success, else 1
+ * Return: 0 for success, else 1
*/
static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
@@ -2192,7 +2180,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -2256,7 +2244,7 @@ scsih_slave_configure(struct scsi_device *sdev)
ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
- if (raid_device->device_info &
+ if (raid_device->device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "SATA";
else
@@ -2365,13 +2353,14 @@ scsih_slave_configure(struct scsi_device *sdev)
"connector name( %s)\n", ds,
pcie_device->enclosure_level,
pcie_device->connector_name);
- pcie_device_put(pcie_device);
- spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- scsih_change_queue_depth(sdev, qdepth);
if (pcie_device->nvme_mdts)
blk_queue_max_hw_sectors(sdev->request_queue,
pcie_device->nvme_mdts/512);
+
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ scsih_change_queue_depth(sdev, qdepth);
/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
** merged and can eliminate holes created during merging
** operation.
@@ -2450,8 +2439,6 @@ scsih_slave_configure(struct scsi_device *sdev)
* params[0] number of heads (max 255)
* params[1] number of sectors (max 63)
* params[2] number of cylinders
- *
- * Return nothing.
*/
static int
scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
@@ -2493,8 +2480,6 @@ scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
* _scsih_response_code - translation of device response code
* @ioc: per adapter object
* @response_code: response code returned by the device
- *
- * Return nothing.
*/
static void
_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
@@ -2544,8 +2529,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
*
* The callback handler when using scsih_issue_tm.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -2640,7 +2625,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* The callback index is set inside `ioc->tm_cb_idx`.
* The caller is responsible to check for outstanding commands.
*
- * Return SUCCESS or FAILED.
+ * Return: SUCCESS or FAILED.
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -2708,11 +2693,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ if (mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2846,7 +2829,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
* scsih_abort - eh threads main abort routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_abort(struct scsi_cmnd *scmd)
@@ -2914,7 +2897,7 @@ scsih_abort(struct scsi_cmnd *scmd)
* scsih_dev_reset - eh threads main device reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_dev_reset(struct scsi_cmnd *scmd)
@@ -2992,7 +2975,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
* scsih_target_reset - eh threads main target reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_target_reset(struct scsi_cmnd *scmd)
@@ -3069,7 +3052,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
* scsih_host_reset - eh threads main host reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_host_reset(struct scsi_cmnd *scmd)
@@ -3105,8 +3088,6 @@ out:
*
* This adds the firmware event object into link list, then queues it up to
* be processed from user context.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -3133,8 +3114,6 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
* Context: This function will acquire ioc->fw_event_lock.
*
* If the fw_event is on the fw_event_list, remove it and do a put.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
@@ -3155,8 +3134,6 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
* mpt3sas_send_trigger_data_event - send event for processing trigger data
* @ioc: per adapter object
* @event_data: trigger event data
- *
- * Return nothing.
*/
void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
@@ -3181,8 +3158,6 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -3203,8 +3178,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_port_enable_complete - port enable completed (fake event)
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
@@ -3242,8 +3215,6 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
- *
- * Return nothing.
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
@@ -3369,7 +3340,7 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
- * @sas_addr: sas address
+ * @sas_address: sas address
*
* unblock then put device in offline state
*/
@@ -3395,7 +3366,6 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
/**
* _scsih_block_io_all_device - set the device state to SDEV_BLOCK
* @ioc: per adapter object
- * @handle: device handle
*
* During device pull we need to appropriately set the sdev state.
*/
@@ -3730,8 +3700,8 @@ out:
* handshake protocol with controller firmware.
* It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -3822,8 +3792,8 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* This code is part of the code to initiate the device removal
* handshake protocol with controller firmware.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3909,8 +3879,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @reply: reply message frame(lower 32bit addr)
* Context: interrupt time.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4004,19 +3974,19 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
u16 smid, u16 handle)
- {
- Mpi2SasIoUnitControlRequest_t *mpi_request;
- u32 ioc_state;
- int i = smid - ioc->internal_smid;
- unsigned long flags;
+{
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host has been removed\n",
__func__, ioc->name));
- return;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery\n",
__func__, ioc->name));
return;
@@ -4059,8 +4029,8 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
* This will check delayed internal messages list, and process the
* next request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4098,8 +4068,8 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This will check delayed target reset list, and feed the
* next reqeust.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4139,8 +4109,6 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This handles the case where driver receives multiple expander
* add and delete events in a single shot. When there is a delete event
* the routine will void any pending add events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4222,8 +4190,6 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
* or device add and delete events in a single shot. When there
* is a delete event the routine will void any pending add
* events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4348,8 +4314,6 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
* volume has been deleted or removed. When the target reset is sent
* to volume, the PD target resets need to be queued to start upon
* completion of the volume target reset.
- *
- * Return nothing.
*/
static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4433,8 +4397,6 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
* This will handle the case when the cable connected to entire volume is
* pulled. We will take care of setting the deleted flag so normal IO will
* not be sent.
- *
- * Return nothing.
*/
static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4456,8 +4418,6 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: the temp threshold event data
* Context: interrupt time.
- *
- * Return nothing.
*/
static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4496,8 +4456,6 @@ static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
*
* The flushing out of all pending scmd commands following host reset,
* where all IO is dropped to the floor.
- *
- * Return nothing.
*/
static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
@@ -4533,8 +4491,6 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
- *
- * Returns nothing
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -4593,8 +4549,6 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* _scsih_eedp_error_handling - return sense code for EEDP errors
* @scmd: pointer to scsi command object
* @ioc_status: ioc status
- *
- * Returns nothing
*/
static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
@@ -4623,12 +4577,12 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
/**
* scsih_qcmd - main scsi request entry point
+ * @shost: SCSI host pointer
* @scmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* The callback index is set inside `ioc->scsi_io_cb_idx`.
*
- * Returns 0 on success. If there's a failure, return either:
+ * Return: 0 on success. If there's a failure, return either:
* SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
@@ -4674,19 +4628,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
- /* host recovery or link resets sent via IOCTLs */
- if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
+ /* host recovery or link resets sent via IOCTLs */
return SCSI_MLQUEUE_HOST_BUSY;
-
- /* device has been deleted */
- else if (sas_target_priv_data->deleted) {
+ } else if (sas_target_priv_data->deleted) {
+ /* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
- sas_device_priv_data->block)
+ sas_device_priv_data->block) {
+ /* device busy with task management */
return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
/*
* Bug work around for firmware SATL handling. The loop
@@ -4791,8 +4745,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
* @data: normalized skey/asc/ascq
- *
- * Return nothing.
*/
static void
_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
@@ -4815,12 +4767,11 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
+ * @smid: ?
*
* scsi_status - SCSI Status code returned from target device
* scsi_state - state info associated with SCSI_IO determined by ioc
* ioc_status - ioc supplied status info
- *
- * Return nothing.
*/
static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -5044,8 +4995,6 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* @ioc: per adapter object
* @handle: device handle
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5089,8 +5038,6 @@ out:
* @ioc: per adapter object
* @sas_device: sas device whose PFA LED has to turned off
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
@@ -5128,8 +5075,6 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5151,8 +5096,6 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5228,8 +5171,8 @@ out_unlock:
*
* Callback handler when using _scsih_qcmd.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5416,6 +5359,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
+ /* fall through */
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -5468,8 +5412,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* During port enable, fw will send topology events for every device. Its
* possible that the handles may change from the previous setting, so this
* code keeping handles updating if changed.
- *
- * Return nothing.
*/
static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
@@ -5523,8 +5465,6 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Creating host side data object, stored in ioc->sas_hba
- *
- * Return nothing.
*/
static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
@@ -5672,7 +5612,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
*
* Creating expander object, stored in ioc->sas_expander_list.
*
- * Return 0 for success, else error.
+ * Return: 0 for success, else error.
*/
static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5812,7 +5752,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
_scsih_expander_node_add(ioc, sas_expander);
- return 0;
+ return 0;
out_fail:
@@ -5827,8 +5767,6 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
- *
- * Return nothing.
*/
void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
@@ -5857,8 +5795,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* Callback handler when sending internal generated SCSI_IO.
* The callback index passed is `ioc->scsih_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5892,9 +5830,9 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* @ioc: per adapter object
* @sas_address: sas address
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -5956,10 +5894,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
static void
_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6076,7 +6012,7 @@ out_unlock:
*
* Creating end device object, stored in ioc->sas_device_list.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
@@ -6208,9 +6144,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/**
* _scsih_remove_device - removing sas device object
* @ioc: per adapter object
- * @sas_device_delete: the sas_device object
- *
- * Return nothing.
+ * @sas_device: the sas_device object
*/
static void
_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6446,6 +6380,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
+ /* fall through */
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6475,10 +6410,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_sas_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -6546,8 +6480,6 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6608,9 +6540,9 @@ out:
* @ioc: per adapter object
* @wwid: wwid
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -6695,8 +6627,6 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
* from SML and free up associated memory
* @ioc: per adapter object
* @pcie_device: the pcie_device object
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
@@ -6770,8 +6700,6 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_check_device - checking device responsiveness
* @ioc: per adapter object
* @handle: attached device handle
- *
- * Returns nothing.
*/
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6863,7 +6791,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
*
* Creating end device object, stored in ioc->pcie_device_list.
*
- * Return 1 means queue the event later, 0 means complete the event
+ * Return: 1 means queue the event later, 0 means complete the event
*/
static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6873,7 +6801,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi2ConfigReply_t mpi_reply;
struct _pcie_device *pcie_device;
struct _enclosure_node *enclosure_dev;
- u32 pcie_device_type;
u32 ioc_status;
u64 wwid;
@@ -6935,8 +6862,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->port_num = pcie_device_pg0.PortNum;
pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- pcie_device_type = pcie_device->device_info &
- MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
pcie_device->enclosure_handle =
le16_to_cpu(pcie_device_pg0.EnclosureHandle);
@@ -7165,6 +7090,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ /* fall through */
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
@@ -7190,12 +7116,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _scsih_pcie_device_status_change_event_debug - debug for
- * device event
+ * _scsih_pcie_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7262,8 +7186,6 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7314,8 +7236,6 @@ out:
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7348,8 +7268,6 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7416,8 +7334,6 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7483,6 +7399,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_VOLUME)
continue;
+ /* skip PCIe devices */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_PCIE_DEVICE)
+ continue;
handle = sas_device_priv_data->sas_target->handle;
lun = sas_device_priv_data->lun;
@@ -7580,8 +7500,6 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7617,8 +7535,6 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7654,8 +7570,6 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7684,7 +7598,7 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle for physical disk
* @phys_disk_num: physical disk number
*
- * Return 0 for success, else failure.
+ * Return: 0 for success, else failure.
*/
static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
@@ -7736,10 +7650,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -7794,8 +7708,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
@@ -7852,8 +7764,6 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: volume device handle
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -7887,8 +7797,6 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
@@ -7929,8 +7837,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
@@ -7980,8 +7886,6 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
@@ -7997,8 +7901,6 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
@@ -8050,8 +7952,6 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8130,8 +8030,6 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8202,8 +8100,6 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8286,8 +8182,6 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8372,8 +8266,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8414,8 +8306,6 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8473,8 +8363,6 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_sas_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8569,8 +8457,6 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
* _scsih_create_enclosure_list_after_reset - Free Existing list,
* And create enclosure list by scanning all Enclosure Page(0)s
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -8617,8 +8503,6 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8661,8 +8545,6 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponding_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8736,8 +8618,6 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8785,8 +8665,6 @@ out:
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_raid_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -8842,8 +8720,6 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8914,8 +8790,6 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
@@ -8968,8 +8842,6 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
@@ -9009,8 +8881,6 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -9136,8 +9006,6 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_scan_for_devices_after_reset - scan for devices after host reset
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -9421,60 +9289,68 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
}
+
/**
* mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
*
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
+ * The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
- ioc->scsih_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
- complete(&ioc->scsih_cmds.done);
- }
- if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
- ioc->tm_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
- complete(&ioc->tm_cmds.done);
- }
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
- memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
- memset(ioc->device_remove_in_progress, 0,
- ioc->device_remove_in_progress_sz);
- _scsih_fw_event_cleanup_queue(ioc);
- _scsih_flush_running_cmds(ioc);
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+}
+
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void
+mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
- _scsih_prep_device_scan(ioc);
- _scsih_create_enclosure_list_after_reset(ioc);
- _scsih_search_responding_sas_devices(ioc);
- _scsih_search_responding_pcie_devices(ioc);
- _scsih_search_responding_raid_devices(ioc);
- _scsih_search_responding_expanders(ioc);
- _scsih_error_recovery_delete_devices(ioc);
- }
- break;
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_create_enclosure_list_after_reset(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
}
}
@@ -9483,8 +9359,6 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -9519,7 +9393,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -9577,13 +9451,10 @@ out:
/**
* _firmware_event_work
- * @ioc: per adapter object
* @work: The fw_event_work object
* Context: user.
*
* wrappers for the work thread handling firmware events
- *
- * Return nothing.
*/
static void
@@ -9605,8 +9476,8 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -9791,8 +9662,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
@@ -9841,8 +9710,6 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
*
* Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
* the host system is shutting down.
- *
- * Return nothing.
*/
static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
@@ -9914,7 +9781,6 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
* @pdev: PCI device struct
*
* Routine called when unloading the driver.
- * Return nothing.
*/
static void scsih_remove(struct pci_dev *pdev)
{
@@ -9996,8 +9862,6 @@ static void scsih_remove(struct pci_dev *pdev)
/**
* scsih_shutdown - routine call during system shutdown
* @pdev: PCI device struct
- *
- * Return nothing.
*/
static void
scsih_shutdown(struct pci_dev *pdev)
@@ -10220,7 +10084,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
*
* Get the next pcie device from pcie_device_init_list list.
*
- * Returns pcie device structure if pcie_device_init_list list is not empty
+ * Return: pcie device structure if pcie_device_init_list list is not empty
* otherwise returns NULL
*/
static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
@@ -10390,7 +10254,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
if (time >= (300 * HZ)) {
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
pr_info(MPT3SAS_FMT
"port enable: FAILED with timeout (timeout=300s)\n",
ioc->name);
@@ -10412,7 +10276,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
ioc->wait_for_discovery_to_complete = 0;
@@ -10568,7 +10432,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
* @pdev: PCI device struct
* @id: pci device id
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -10818,7 +10682,7 @@ out_add_shost_fail:
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -10845,7 +10709,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
* scsih_resume - power management resume main entry point
* @pdev: PCI device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_resume(struct pci_dev *pdev)
@@ -10881,8 +10745,7 @@ scsih_resume(struct pci_dev *pdev)
*
* Description: Called when a PCI error is detected.
*
- * Return value:
- * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
*/
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
@@ -11143,7 +11006,7 @@ static struct pci_driver mpt3sas_driver = {
/**
* scsih_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_init(void)
@@ -11193,7 +11056,7 @@ scsih_init(void)
/**
* scsih_exit - exit point for this driver (when it is a module).
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static void
scsih_exit(void)
@@ -11223,7 +11086,7 @@ scsih_exit(void)
/**
* _mpt3sas_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int __init
_mpt3sas_init(void)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 3a143bb5ca72..f8cc2677c1cd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -134,7 +134,7 @@ _transport_convert_phy_link_rate(u8 link_rate)
*
* Populates sas identify info.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -226,8 +226,8 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* Callback handler when sending internal generated transport cmds.
* The callback index passed is `ioc->transport_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -287,7 +287,7 @@ struct rep_manu_reply {
*
* Fills in the sas_expander_device object when SMP port is created.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
@@ -460,8 +460,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
* _transport_delete_port - helper function to removing a port
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
- *
- * Returns nothing.
*/
static void
_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
@@ -489,8 +487,6 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
@@ -513,8 +509,6 @@ _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
@@ -538,8 +532,6 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
- *
- * Returns nothing.
*/
static void
_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -563,7 +555,7 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
return;
}
_transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
- return;
+ return;
}
}
@@ -573,8 +565,6 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -635,7 +625,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
*
* Adding new port object to the sas_node->sas_port_list.
*
- * Returns mpt3sas_port.
+ * Return: mpt3sas_port.
*/
struct _sas_port *
mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -794,8 +784,6 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*
* Removing object and freeing associated memory from the
* ioc->sas_port_list.
- *
- * Return nothing.
*/
void
mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -860,7 +848,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @phy_pg0: sas phy page 0
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -928,7 +916,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @expander_pg1: expander page 1
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -995,10 +983,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @ioc: per adapter object
* @sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
void
mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
@@ -1090,7 +1076,7 @@ struct phy_error_log_reply {
* @ioc: per adapter object
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1262,7 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
* _transport_get_linkerrors - return phy counters for both hba and expanders
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1311,10 +1297,11 @@ _transport_get_linkerrors(struct sas_phy *phy)
/**
* _transport_get_enclosure_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
+ * @identifier: ?
*
* Obtain the enclosure logical id for an expander.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
@@ -1342,9 +1329,9 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
/**
* _transport_get_bay_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
*
- * Returns the slot id for a device that resides inside an enclosure.
+ * Return: the slot id for a device that resides inside an enclosure.
*/
static int
_transport_get_bay_identifier(struct sas_rphy *rphy)
@@ -1400,8 +1387,9 @@ struct phy_control_reply {
* _transport_expander_phy_control - expander phy control
* @ioc: per adapter object
* @phy: The sas phy object
+ * @phy_operation: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1571,7 +1559,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
* @phy: The sas phy object
* @hard_reset:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1623,7 +1611,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
* @enable: enable phy when true
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_enable(struct sas_phy *phy, int enable)
@@ -1761,7 +1749,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
* @rates: rates defined in sas_phy_linkrates
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
@@ -1904,9 +1893,9 @@ _transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
/**
* _transport_smp_handler - transport portal for smp passthru
+ * @job: ?
* @shost: shost object
* @rphy: sas transport rphy object
- * @req:
*
* This used primarily for smp_utils.
* Example:
@@ -1936,12 +1925,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
rc = -EFAULT;
- goto out;
+ goto job_done;
}
rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
if (rc)
- goto out;
+ goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
@@ -2066,6 +2055,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->transport_cmds.mutex);
+job_done:
bsg_job_done(job, rc, reslen);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index b60fd7a3b571..cae7c1eaef34 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -62,7 +62,7 @@
/**
* _mpt3sas_raise_sigio - notifiy app
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
static void
_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
@@ -107,7 +107,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
/**
* mpt3sas_process_trigger_data - process the event data for the trigger
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
void
mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
@@ -209,8 +209,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
/**
* mpt3sas_trigger_event - Event trigger handler
* @ioc: per adapter object
- * @event:
- * @log_entry_qualifier:
+ * @event: ?
+ * @log_entry_qualifier: ?
*
*/
void
@@ -288,9 +288,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
/**
* mpt3sas_trigger_scsi - SCSI trigger handler
* @ioc: per adapter object
- * @sense_key:
- * @asc:
- * @ascq:
+ * @sense_key: ?
+ * @asc: ?
+ * @ascq: ?
*
*/
void
@@ -364,8 +364,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
/**
* mpt3sas_trigger_mpi - MPI trigger handler
* @ioc: per adapter object
- * @ioc_status:
- * @loginfo:
+ * @ioc_status: ?
+ * @loginfo: ?
*
*/
void
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 45aa94915cbf..b4927f2b7677 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -267,9 +267,6 @@ out_error:
* @scmd: pointer to scsi command object
* @raid_device: pointer to raid device data structure
* @mpi_request: pointer to the SCSI_IO reqest message frame
- * @smid: system request message index
- *
- * Returns nothing
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index dc4e801b2cef..6cd3e289ef99 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4611,7 +4611,7 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* in order to keep it alive.
*/
if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- cmd->result = ScsiResult(DID_RESET, 0);
+ cmd->result = DID_RESET << 16;
ncr_queue_done_cmd(np, cmd);
}
@@ -4957,7 +4957,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Check condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+ cmd->result = DID_OK << 16 | S_CHECK_COND;
/*
** Copy back sense data to caller's buffer.
@@ -4978,7 +4978,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Reservation Conflict condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CONFLICT);
+ cmd->result = DID_OK << 16 | S_CONFLICT;
} else if ((cp->host_status == HS_COMPLETE)
&& (cp->scsi_status == S_BUSY ||
@@ -8043,7 +8043,7 @@ printk("ncr53c8xx_queue_command\n");
spin_lock_irqsave(&np->smp_lock, flags);
if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
- cmd->result = ScsiResult(sts, 0);
+ cmd->result = sts << 16;
#ifdef DEBUG_NCR53C8XX
printk("ncr53c8xx : command not queued - result=%d\n", sts);
#endif
@@ -8234,7 +8234,7 @@ static void process_waiting_list(struct ncb *np, int sts)
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
#endif
- wcmd->result = ScsiResult(sts, 0);
+ wcmd->result = sts << 16;
ncr_queue_done_cmd(np, wcmd);
}
}
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index 58806f432a16..4f1d4bf9c775 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -137,7 +137,7 @@ static void print_commandk (unsigned char *command)
printk("\n");
}
-static void show_command(Scsi_Cmnd *SCpnt)
+static void show_command(struct scsi_cmnd *SCpnt)
{
print_commandk(SCpnt->cmnd);
}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index a269da1a6c75..387dc87e4d22 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -126,22 +126,24 @@ static void init_sqe(struct iscsi_task_params *task_params,
sgl_task_params,
dif_task_params);
- if (scsi_is_slow_sgl(sgl_task_params->num_sges,
- sgl_task_params->small_mid_sge))
- num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
- else
- num_sges = min(sgl_task_params->num_sges,
- (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
- }
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
- SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
- buf_size);
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
- if (GET_FIELD(pdu_header->hdr_second_dword,
- ISCSI_CMD_HDR_TOTAL_AHS_LEN))
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
- cmd_params->extended_cdb_sge.sge_len);
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
}
break;
case ISCSI_TASK_TYPE_INITIATOR_READ:
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cff83b9457f7..aa96bccb5a96 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -524,7 +524,7 @@ static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+ id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index c8731568f9c4..4888b999e82f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -518,6 +518,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -570,7 +573,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
return 0;
rval = qla2x00_read_sfp_dev(vha, buf, count);
@@ -733,6 +736,15 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
int type;
port_id_t did;
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
type = simple_strtol(buf, NULL, 10);
did.b.domain = (type & 0x00ff0000) >> 16;
@@ -771,6 +783,12 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (ha->xgmac_data)
goto do_read;
@@ -825,6 +843,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
@@ -1036,7 +1057,7 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
vha->device_flags & DFLG_NO_CABLE)
len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
- qla2x00_reset_active(vha))
+ qla2x00_chip_is_down(vha))
len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -1163,7 +1184,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
@@ -1350,7 +1371,7 @@ qla2x00_thermal_temp_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
@@ -1381,7 +1402,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
@@ -1840,7 +1861,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (unlikely(pci_channel_offline(ha->pdev)))
goto done;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
goto done;
stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5fd44c50bbac..c7533fa7f46e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1130,6 +1130,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
@@ -1384,6 +1385,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
@@ -2036,6 +2038,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"request...\n", ha->fw_dump);
goto qla83xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 0f94b1d62d3f..a9dc9c4a6382 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -313,6 +313,7 @@ struct srb_cmd {
#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+#define SRB_WAKEUP_ON_COMP BIT_6
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
@@ -379,6 +380,7 @@ struct srb_iocb {
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
+#define SRB_LOGIN_PRLI_ONLY BIT_4
uint16_t data[2];
u32 iop[2];
} logio;
@@ -398,6 +400,8 @@ struct srb_iocb {
struct completion comp;
struct els_plogi_payload *els_plogi_pyld;
struct els_plogi_payload *els_resp_pyld;
+ u32 tx_size;
+ u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
uint32_t fw_status[3];
@@ -2312,6 +2316,7 @@ enum fcport_mgt_event {
FCME_ADISC_DONE,
FCME_GNNID_DONE,
FCME_GFPNID_DONE,
+ FCME_ELS_PLOGI_DONE,
};
enum rscn_addr_format {
@@ -2408,6 +2413,7 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
u32 login_gen, last_login_gen;
@@ -2418,7 +2424,8 @@ typedef struct fc_port {
u8 iocb[IOCB_SIZE];
u8 current_login_state;
u8 last_login_state;
- struct completion n2n_done;
+ u16 n2n_link_reset_cnt;
+ u16 n2n_chip_reset;
} fc_port_t;
#define QLA_FCPORT_SCAN 1
@@ -3228,6 +3235,7 @@ enum qla_work_type {
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
+ QLA_EVT_ELS_PLOGI,
};
@@ -3599,6 +3607,8 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
uint32_t rida_fmt2:1;
+ uint32_t purge_mbox:1;
+ uint32_t n2n_bigger:1;
} flags;
uint16_t max_exchg;
@@ -3844,6 +3854,10 @@ struct qla_hw_data {
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
+ atomic_t num_pend_mbx_stage1;
+ atomic_t num_pend_mbx_stage2;
+ atomic_t num_pend_mbx_stage3;
+ uint16_t frame_payload_size;
uint32_t login_retry_count;
/* SNS command interfaces. */
@@ -3903,6 +3917,9 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
+ /* n2n */
+ struct els_plogi_payload plogi_els_payld;
+
void *swl;
/* These are used by mailbox operations. */
@@ -4157,6 +4174,7 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
+ uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
@@ -4238,7 +4256,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
-#define FREE_BIT 21
+#define N2N_LINK_RESET 21
#define PORT_UPDATE_NEEDED 22
#define FX00_RESET_RECOVERY 23
#define FX00_TARGET_SCAN 24
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 5d8688e5bc7c..50c1e6c62e31 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1366,6 +1366,11 @@ struct vp_rpt_id_entry_24xx {
/* format 1 fabric */
uint8_t vpstat1_subcode; /* vp_status=1 subcode */
uint8_t flags;
+#define TOPO_MASK 0xE
+#define TOPO_FL 0x2
+#define TOPO_N2N 0x4
+#define TOPO_F 0x6
+
uint16_t fip_flags;
uint8_t rsv2[12];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2660a48d918a..178974896b5c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,8 +45,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
- port_id_t);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -118,6 +117,7 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
*/
@@ -212,7 +212,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_async_abort_cmd(srb_t *, bool);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 7a3744006419..a0038d879b9d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1962,7 +1962,6 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
void *entries;
struct ct_fdmiv2_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
struct new_utsname *p_sysid = NULL;
/* Issue RHBA */
@@ -2142,9 +2141,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
eiter->len = cpu_to_be16(4 + 4);
size += 4 + 4;
@@ -3394,19 +3391,40 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ struct srb_iocb *c = &sp->u.iocb_cmd;
+
+ switch (sp->type) {
+ case SRB_ELS_DCMD:
+ if (c->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.tx_size,
+ c->u.els_plogi.els_plogi_pyld,
+ c->u.els_plogi.els_plogi_pyld_dma);
+
+ if (c->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.rx_size,
+ c->u.els_plogi.els_resp_pyld,
+ c->u.els_plogi.els_resp_pyld_dma);
+ break;
+ case SRB_CT_PTHRU_CMD:
+ default:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
}
sp->free(sp);
@@ -3483,6 +3501,14 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
switch (fcport->disc_state) {
case DSC_LOGIN_COMPLETE:
/* recheck session is still intact. */
@@ -3981,6 +4007,14 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
} else {
if (fcport->rscn_rcvd ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
fcport->rscn_rcvd = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1b19b954bbae..b934977c5c26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -50,16 +50,15 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
- scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
unsigned long flags;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- req = vha->hw->req_q_map[0];
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
iocb->timeout(sp);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
void
@@ -100,6 +99,8 @@ qla2x00_async_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ int rc, h;
+ unsigned long flags;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -114,11 +115,26 @@ qla2x00_async_iocb_timeout(void *data)
switch (sp->type) {
case SRB_LOGIN_CMD:
- /* Retry as needed. */
- lio->u.logio.data[0] = MBS_COMMAND_ERROR;
- lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED : 0;
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ /* Retry as needed. */
+ lio->u.logio.data[0] = MBS_COMMAND_ERROR;
+ lio->u.logio.data[1] =
+ lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
case SRB_LOGOUT_CMD:
case SRB_CT_PTHRU_CMD:
@@ -127,7 +143,21 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
case SRB_CTRL_VP:
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
}
}
@@ -160,6 +190,22 @@ qla2x00_async_login_sp_done(void *ptr, int res)
sp->free(sp);
}
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+ if (wwn_to_u64(fcport->port_name) <
+ wwn_to_u64(fcport->vha->port_name))
+ return true;
+ else
+ return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+ return !fcport_is_smaller(fcport);
+}
+
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -189,13 +235,16 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
+ lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
- lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
+ }
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -370,6 +419,19 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__qla24xx_handle_gpdb_event(vha, ea);
}
+int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
@@ -382,7 +444,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
"Async done-%s res %x %8phC\n",
sp->name, res, sp->fcport->port_name);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_ADISC_DONE;
@@ -418,6 +480,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_adisc_sp_done;
@@ -464,7 +528,6 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
ql_dbg(ql_dbg_disc, vha, 0x20de,
"GNL failed Port login retry %8phN, retry cnt=%d.\n",
fcport->port_name, fcport->login_retry);
@@ -497,35 +560,51 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
for (i = 0; i < n; i++) {
e = &vha->gnl.l[i];
wwn = wwn_to_u64(e->port_name);
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
continue;
+ if (IS_SW_RESV_ADDR(id))
+ continue;
+
found = 1;
- id.b.domain = e->port_id[2];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[0];
- id.b.rsvd_1 = 0;
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
+ if (fcport->fc4f_nvme)
+ current_login_state = e->current_login_state >> 4;
+ else
+ current_login_state = e->current_login_state & 0xf;
+
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- id.b.domain, id.b.area, id.b.al_pa,
+ fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
- if ((id.b24 != fcport->d_id.b24) ||
- ((fcport->loop_id != FC_NO_LOOP_ID) &&
- (fcport->loop_id != loop_id))) {
- ql_dbg(ql_dbg_disc, vha, 0x20e3,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- return;
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ case DSC_DELETED:
+ break;
+ default:
+ if ((id.b24 != fcport->d_id.b24 &&
+ fcport->d_id.b24) ||
+ (fcport->loop_id != FC_NO_LOOP_ID &&
+ fcport->loop_id != loop_id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ break;
}
fcport->loop_id = loop_id;
@@ -544,68 +623,148 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->login_pause = 1;
}
- if (fcport->fc4f_nvme)
- current_login_state = e->current_login_state >> 4;
- else
- current_login_state = e->current_login_state & 0xf;
-
- switch (current_login_state) {
- case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc, vha, 0x20e4,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ switch (vha->hw->current_topology) {
+ default:
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e4, "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
- if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PORT_UNAVAIL:
+ default:
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(vha, fcport, data);
- break;
- case DSC_LS_PORT_UNAVAIL:
- default:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
+ fcport->loop_id = loop_id;
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
}
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d %8phC\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
break;
- }
+ case ISP_CFG_N:
+ fcport->fw_login_state = current_login_state;
+ fcport->d_id = id;
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PLOGI_COMP:
+ if (fcport_is_bigger(fcport)) {
+ /* local adapter is smaller */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ break;
+ }
+ /* drop through */
+ default:
+ if (fcport_is_smaller(fcport)) {
+ /* local adapter is bigger */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ }
+ break;
+ }
+ break;
+ } /* switch (ha->current_topology) */
}
if (!found) {
- /* fw has no record of this port */
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
- if (conflict_fcport) {
- qlt_schedule_sess_for_deletion
- (conflict_fcport);
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_F:
+ case ISP_CFG_FL:
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ if (conflict_fcport) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e5,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
+ }
}
+ /*
+ * FW already picked this loop id for
+ * another fcport
+ */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
-
- /* FW already picked this loop id for another fcport */
- if (fcport->loop_id == loop_id)
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case ISP_CFG_N:
+ fcport->disc_state = DSC_DELETED;
+ if (time_after_eq(jiffies, fcport->dm_login_expire)) {
+ if (fcport->n2n_link_reset_cnt < 2) {
+ fcport->n2n_link_reset_cnt++;
+ /*
+ * remote port is not sending PLOGI.
+ * Reset link to kick start his state
+ * machine
+ */
+ set_bit(N2N_LINK_RESET,
+ &vha->dpc_flags);
+ } else {
+ if (fcport->n2n_chip_reset < 1) {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Chip reset to bring laser down");
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ fcport->n2n_chip_reset++;
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Remote port %8ph is not coming back\n",
+ fcport->port_name);
+ fcport->scan_state = 0;
+ }
+ }
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /*
+ * report port suppose to do PLOGI. Give him
+ * more time. FW will catch it.
+ */
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ break;
+ default:
+ break;
}
- qla24xx_fcport_handle_login(vha, fcport);
}
} /* gnl_event */
@@ -911,9 +1070,9 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
}
ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
- fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b24, fcport->login_retry);
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
+ fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
return rval;
@@ -1055,8 +1214,9 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, ea->rc);
+ "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
+ ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -1074,9 +1234,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PLOGI_COMPLETE:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
- __func__, __LINE__, fcport->port_name);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* Set discovery state back to GNL to Relogin attempt */
+ if (qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) {
+ fcport->disc_state = DSC_GNL;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
@@ -1174,39 +1337,80 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
- if (fcport->login_retry > 0)
- fcport->login_retry--;
-
switch (fcport->disc_state) {
case DSC_DELETED:
wwn = wwn_to_u64(fcport->node_name);
- if (wwn == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post GNNID\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnnid_work(vha, fcport);
- } else if (fcport->loop_id == FC_NO_LOOP_ID) {
- ql_dbg(ql_dbg_disc, vha, 0x20bd,
- "%s %d %8phC post gnl\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnl_work(vha, fcport);
- } else {
- qla_chk_n2n_b4_login(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if (fcport_is_smaller(fcport)) {
+ /* this adapter is bigger */
+ if (fcport->login_retry) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha,
+ fcport);
+ fcport->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ }
+ fcport->login_retry--;
+ qla_post_els_plogi_work(vha, fcport);
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Unable to reach remote port %8phC",
+ fcport->port_name);
+ }
+ } else {
+ qla24xx_post_gnl_work(vha, fcport);
+ }
+ break;
+ default:
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnl_work(vha, fcport);
+ } else {
+ qla_chk_n2n_b4_login(vha, fcport);
+ }
+ break;
}
break;
case DSC_GNL:
- if (fcport->login_pause) {
- fcport->last_rscn_gen = fcport->rscn_gen;
- fcport->last_login_gen = fcport->login_gen;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ break;
+ default:
+ if (fcport->login_pause) {
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ qla_chk_n2n_b4_login(vha, fcport);
break;
}
-
- qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
+ fcport->login_retry--;
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1221,6 +1425,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d1,
"%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
+ fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1304,17 +1509,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
}
- if (fcport->flags & FCF_ASYNC_SENT) {
- fcport->login_retry++;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- return;
- }
-
- if (fcport->disc_state == DSC_DELETE_PEND) {
- fcport->login_retry++;
- return;
- }
-
if (fcport->last_rscn_gen != fcport->rscn_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1326,6 +1520,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
@@ -1427,6 +1630,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFPNID_DONE:
qla24xx_handle_gfpnid_event(vha, ea);
break;
+ case FCME_ELS_PLOGI_DONE:
+ qla_handle_els_plogi_done(vha, ea);
+ break;
default:
BUG_ON(1);
break;
@@ -1520,7 +1726,7 @@ qla24xx_abort_iocb_timeout(void *data)
struct srb_iocb *abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = CS_TIMEOUT;
- complete(&abt->u.abt.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -1529,12 +1735,16 @@ qla24xx_abort_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- if (del_timer(&sp->u.iocb_cmd.timer))
- complete(&abt->u.abt.comp);
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&abt->u.abt.comp);
+ else
+ sp->free(sp);
+ }
}
int
-qla24xx_async_abort_cmd(srb_t *cmd_sp)
+qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
fc_port_t *fcport = cmd_sp->fcport;
@@ -1549,6 +1759,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1572,10 +1784,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
"Abort command issued - hdl=%x, target_id=%x\n",
cmd_sp->handle, fcport->tgt_id);
- wait_for_completion(&abt_iocb->u.abt.comp);
-
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ if (wait) {
+ wait_for_completion(&abt_iocb->u.abt.comp);
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ }
done_free_sp:
sp->free(sp);
@@ -1611,7 +1824,7 @@ qla24xx_async_abort_command(srb_t *sp)
return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
FXDISC_ABORT_IOCTL);
- return qla24xx_async_abort_cmd(sp);
+ return qla24xx_async_abort_cmd(sp, true);
}
static void
@@ -1799,7 +2012,6 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
fcport->login_gen++;
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -4050,7 +4262,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_update_host_map(vha, id);
+ if (!(topo == 2 && ha->flags.n2n_bigger))
+ qlt_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
@@ -4308,7 +4521,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/* Use alternate WWN? */
if (nv->host_p[1] & BIT_7) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
@@ -4591,20 +4804,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
- if (ha->flags.rida_fmt2) {
- /* With Rida Format 2, the login is already triggered.
- * We know who is on the other side of the wire.
- * No need to login to do login to find out or drop into
- * qla2x00_configure_local_loop().
- */
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
clear_bit(LOCAL_LOOP_UPDATE, &flags);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
- if (qla_tgt_mode_enabled(vha)) {
- /* allow the other side to start the login */
- clear_bit(LOCAL_LOOP_UPDATE, &flags);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- }
}
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
@@ -4688,110 +4891,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
}
/*
- * N2N Login
- * Updates Fibre Channel Device Database with local loop devices.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- */
-static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
- fc_port_t *fcport)
-{
- struct qla_hw_data *ha = vha->hw;
- int res = QLA_SUCCESS, rval;
- int greater_wwpn = 0;
- int logged_in = 0;
-
- if (ha->current_topology != ISP_CFG_N)
- return res;
-
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(vha->n2n_port_name)) {
- ql_dbg(ql_dbg_disc, vha, 0x2002,
- "HBA WWPN is greater %llx > target %llx\n",
- wwn_to_u64(vha->port_name),
- wwn_to_u64(vha->n2n_port_name));
- greater_wwpn = 1;
- fcport->d_id.b24 = vha->n2n_id;
- }
-
- fcport->loop_id = vha->loop_id;
- fcport->fc4f_nvme = 0;
- fcport->query = 1;
-
- ql_dbg(ql_dbg_disc, vha, 0x4001,
- "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
- fcport->d_id.b24, vha->loop_id);
-
- /* Fill in member data. */
- if (!greater_wwpn) {
- rval = qla2x00_get_port_database(vha, fcport, 0);
- ql_dbg(ql_dbg_disc, vha, 0x1051,
- "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
- fcport->current_login_state, fcport->last_login_state,
- fcport->d_id.b24, fcport->loop_id, rval);
-
- if (((fcport->current_login_state & 0xf) == 0x4) ||
- ((fcport->current_login_state & 0xf) == 0x6))
- logged_in = 1;
- }
-
- if (logged_in || greater_wwpn) {
- if (!vha->nvme_local_port && vha->flags.nvme_enabled)
- qla_nvme_register_hba(vha);
-
- /* Set connected N_Port d_id */
- if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
-
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- fcport->disc_state = DSC_GNL;
- fcport->n2n_flag = 1;
- fcport->flags = 3;
- vha->hw->flags.gpsc_supported = 0;
-
- if (greater_wwpn) {
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d PLOGI ELS %8phC\n",
- __func__, __LINE__, fcport->port_name);
-
- res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- fcport, fcport->d_id);
- }
-
- if (res != QLA_SUCCESS) {
- ql_log(ql_log_info, vha, 0xd04d,
- "PLOGI Failed: portid=%06x - retrying\n",
- fcport->d_id.b24);
- res = QLA_SUCCESS;
- } else {
- /* State 0x6 means FCP PRLI complete */
- if ((fcport->current_login_state & 0xf) == 0x6) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post GPDB work\n",
- __func__, __LINE__, fcport->port_name);
- fcport->chip_reset =
- vha->hw->base_qpair->chip_reset;
- qla24xx_post_gpdb_work(vha, fcport, 0);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post NVMe PRLI\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_prli_work(vha, fcport);
- }
- }
- } else {
- /* Wait for next database change */
- set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- }
-
- return res;
-}
-
-/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
*
@@ -4817,6 +4916,31 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, i, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ (void *)ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ bp = (uint32_t *)ha->init_cb;
+ for (i = 0; i < sz/4 ; i++, bp++)
+ *bp = cpu_to_be32(*bp);
+
+ memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
+ sizeof(ha->plogi_els_payld.data));
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ }
+ return QLA_SUCCESS;
+ }
+
found_devs = 0;
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES_LOOP;
@@ -4848,14 +4972,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /* Inititae N2N login. */
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- rval = qla24xx_n2n_handle_login(vha, new_fcport);
- if (rval != QLA_SUCCESS)
- goto cleanup_allocation;
- return QLA_SUCCESS;
- }
-
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
@@ -5054,6 +5170,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport *rport;
unsigned long flags;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ return;
+
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5109,25 +5228,28 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
- ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
- __func__, fcport->port_name);
-
- if (IS_QLAFX00(vha->hw)) {
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- } else {
- fcport->login_retry = 0;
- fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- fcport->deleted = 0;
- fcport->logout_on_delete = 1;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- }
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ case ISP_CFG_NL:
+ fcport->keep_nport_handle = 1;
+ break;
+ default:
+ break;
+ }
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
@@ -5168,6 +5290,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -5668,6 +5791,34 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
}
+/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
+int
+qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
+{
+ int loop_id = FC_NO_LOOP_ID;
+ int lid = NPH_MGMT_SERVER - vha->vp_idx;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == 0) {
+ set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
+ return NPH_MGMT_SERVER;
+ }
+
+ /* pick id from high and work down to low */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ for (; lid > 0; lid--) {
+ if (!test_bit(lid, vha->hw->loop_id_map)) {
+ set_bit(lid, vha->hw->loop_id_map);
+ loop_id = lid;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return loop_id;
+}
+
/*
* qla2x00_fabric_login
* Issue fabric login command.
@@ -6335,6 +6486,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
+ ha->flags.purge_mbox = 1;
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
@@ -6349,13 +6501,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
- ha->base_qpair->chip_reset++;
+ ha->chip_reset++;
+ ha->base_qpair->chip_reset = ha->chip_reset;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
}
+ /* purge MBox commands */
+ if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
+ i = 0;
+ while (atomic_read(&ha->num_pend_mbx_stage3) ||
+ atomic_read(&ha->num_pend_mbx_stage2) ||
+ atomic_read(&ha->num_pend_mbx_stage1)) {
+ msleep(20);
+ i++;
+ if (i > 50)
+ break;
+ }
+ ha->flags.purge_mbox = 0;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -6861,7 +7031,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
(uint8_t *)&icb->interrupt_delay_timer;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -6960,6 +7130,9 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
+
/* Enable ZIO. */
if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
@@ -7069,7 +7242,7 @@ check_valid_image:
ha->active_image = QLA27XX_SECONDARY_IMAGE;
}
- ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
ha->active_image == 0 ? "default bootld and fw" :
ha->active_image == 1 ? "primary" :
ha->active_image == 2 ? "secondary" :
@@ -7917,7 +8090,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Use extended-initialization control block. */
memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -8042,8 +8215,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= BIT_0;
+
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
if (IS_QLA27XX(ha)) {
icb->firmware_options_3 |= BIT_8;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 59fd5a9dfeb8..4351736b2426 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
static inline void
qla2x00_poll(struct rsp_que *rsp)
{
- unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
- local_irq_save(flags);
+
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
- local_irq_restore(flags);
}
static inline uint8_t *
@@ -204,6 +202,12 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
+static inline int
+qla2x00_chip_is_down(scsi_qla_host_t *vha)
+{
+ return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
+}
+
static inline srb_t *
qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
{
@@ -278,8 +282,6 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- if (sp->type == SRB_ELS_DCMD)
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
add_timer(&sp->u.iocb_cmd.timer);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dd93a22fe843..42ac8e097419 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2241,12 +2241,15 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
-
- if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
- logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
- if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
- logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+ } else {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
+ logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
+ logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ }
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2463,6 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
@@ -2510,7 +2514,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- uint32_t dsd_len = 24;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2533,20 +2536,21 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->control_flags = 0;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
- els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->tx_byte_count = els_iocb->tx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->tx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
els_iocb->tx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
- els_iocb->tx_len = dsd_len;
els_iocb->rx_dsd_count = 1;
- els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->rx_byte_count = els_iocb->rx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->rx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
els_iocb->rx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
- els_iocb->rx_len = dsd_len;
+
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
@@ -2569,33 +2573,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
}
static void
-qla2x00_els_dcmd2_sp_free(void *data)
-{
- srb_t *sp = data;
- struct srb_iocb *elsio = &sp->u.iocb_cmd;
-
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
-
- del_timer(&elsio->timer);
- qla2x00_rel_sp(sp);
-}
-
-static void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
unsigned long flags = 0;
int res;
@@ -2611,7 +2594,7 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
(res == QLA_SUCCESS) ? "successful" : "failed");
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- complete(&lio->u.els_plogi.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -2621,17 +2604,55 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
+ struct qla_work_evt *e;
+
+ ql_dbg(ql_dbg_disc, vha, 0x3072,
+ "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
+ sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
- ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8phC\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ del_timer(&sp->u.iocb_cmd.timer);
- complete(&lio->u.els_plogi.comp);
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&lio->u.els_plogi.comp);
+ else {
+ if (res) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.rc = res;
+ ea.event = FCME_ELS_PLOGI_DONE;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+ if (!e) {
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+ sp->free(sp);
+ return;
+ }
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ }
}
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, port_id_t remote_did)
+ fc_port_t *fcport, bool wait)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -2649,23 +2670,23 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
elsio = &sp->u.iocb_cmd;
- fcport->d_id.b.domain = remote_did.b.domain;
- fcport->d_id.b.area = remote_did.b.area;
- fcport->d_id.b.al_pa = remote_did.b.al_pa;
-
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
init_completion(&elsio->u.els_plogi.comp);
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
sp->done = qla2x00_els_dcmd2_sp_done;
- sp->free = qla2x00_els_dcmd2_sp_free;
+ elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
@@ -2690,33 +2711,52 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
+ memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
+ &ha->plogi_els_payld.data,
+ sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
- qla24xx_get_port_login_templ(vha, ptr_dma + 4,
- &elsio->u.els_plogi.els_plogi_pyld->data[0],
- sizeof(struct els_plogi_payload));
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
+ sp->name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, vha->d_id.b24);
}
- ql_dbg(ql_dbg_io, vha, 0x3074,
- "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
- sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
-
- wait_for_completion(&elsio->u.els_plogi.comp);
+ if (wait) {
+ wait_for_completion(&elsio->u.els_plogi.comp);
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
+ if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
+ }
out:
+ fcport->flags &= ~(FCF_ASYNC_SENT);
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+
sp->free(sp);
+done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7756106d4555..36cbb29c84f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -911,7 +911,8 @@ skip_rio:
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ if (!N2N_TOPO(ha))
+ qla2x00_mark_all_devices_lost(vha, 1);
}
if (vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f0ec13d48bf3..2c6c2cd5a0d0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -59,6 +59,7 @@ static struct rom_cmd {
{ MBC_IOCB_COMMAND_A64 },
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
+ { MBC_GET_RNID_PARAMS },
};
static int is_rom_cmd(uint16_t cmd)
@@ -110,6 +111,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ u32 chip_reset;
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
@@ -140,7 +142,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
+ chip_reset = ha->chip_reset;
if (ha->flags.pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1003,
@@ -167,6 +169,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_inc(&ha->num_pend_mbx_stage1);
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -177,8 +180,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_dec(&ha->num_pend_mbx_stage1);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
@@ -189,6 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ goto premature_exit;
+ }
+
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
@@ -231,7 +247,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
-
+ atomic_inc(&ha->num_pend_mbx_stage2);
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
@@ -241,6 +257,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -254,6 +271,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
+ atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -261,7 +279,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ } else if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
}
+ atomic_dec(&ha->num_pend_mbx_stage3);
+
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
command, jiffies_to_msecs(jiffies - wait_time));
@@ -275,6 +303,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -289,6 +318,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) {
+ if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
+
if (time_after(jiffies, wait_time))
break;
@@ -312,6 +349,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
+ atomic_dec(&ha->num_pend_mbx_stage2);
/* Check whether we timed out */
if (ha->flags.mbox_int) {
@@ -390,7 +428,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Capture FW dump only, if PCI device active */
if (!pci_channel_offline(vha->hw->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (w == 0xffff || ictrl == 0xffffffff) {
+ if (w == 0xffff || ictrl == 0xffffffff ||
+ (chip_reset != ha->chip_reset)) {
/* This is special case if there is unload
* of driver happening and if PCI device go
* into bad state due to PCI error condition
@@ -497,7 +536,11 @@ premature_exit:
complete(&ha->mbx_cmd_comp);
mbx_done:
- if (rval) {
+ if (rval == QLA_ABORTED) {
+ ql_log(ql_log_info, vha, 0xd035,
+ "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
+ mcp->mb[0]);
+ } else if (rval) {
if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
dev_name(&ha->pdev->dev), 0x1020+0x800,
@@ -2177,7 +2220,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mcp->out_mb = MBX_2|MBX_1|MBX_0;
} else if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = BIT_6;
+ if (N2N_TOPO(vha->hw))
+ mcp->mb[1] = BIT_4; /* re-init */
+ else
+ mcp->mb[1] = BIT_6; /* LIP */
mcp->mb[2] = 0;
mcp->mb[3] = vha->hw->loop_reset_delay;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3797,30 +3843,68 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Format 1: WWPN %8phC.\n",
vha->port_name);
- /* N2N. direct connect */
- if (IS_QLA27XX(ha) &&
- ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
- /* if our portname is higher then initiate N2N login */
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- // ??? qlt_update_host_map(vha, id);
- vha->n2n_id = 0x1;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Setting n2n_update_needed for id %d\n",
- vha->n2n_id);
+ switch (rptid_entry->u.f1.flags & TOPO_MASK) {
+ case TOPO_N2N:
+ ha->current_topology = ISP_CFG_N;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f1.port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->dm_login_expire = jiffies + 3*HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
} else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f1.port_name,
+ rptid_entry->u.f1.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
}
- memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
- WWN_SIZE);
+ /* if our portname is higher then initiate N2N login */
+
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
ha->flags.n2n_ae = 1;
return;
+ break;
+ case TOPO_FL:
+ ha->current_topology = ISP_CFG_FL;
+ break;
+ case TOPO_F:
+ ha->current_topology = ISP_CFG_F;
+ break;
+ default:
+ break;
}
ha->flags.gpsc_supported = 1;
@@ -3909,30 +3993,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name, 1);
if (fcport) {
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- } else {
- id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
- id.b.area = rptid_entry->u.f2.remote_nport_id[1];
- id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
- qla24xx_post_newsess_work(vha, &id,
- rptid_entry->u.f2.port_name,
- rptid_entry->u.f2.node_name,
- NULL,
- FC4_TYPE_UNKNOWN);
}
}
}
@@ -4663,7 +4726,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ *bp = le32_to_cpu(*bp);
}
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa727d07b702..d620f4bebcd0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -492,7 +492,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index c5a963c2c86e..20d9dc39f0fb 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -30,6 +30,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0;
}
+ if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
+ return 0;
+
if (!(fcport->nvme_prli_service_param &
(NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
@@ -676,15 +679,15 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
}
}
-void qla_nvme_register_hba(struct scsi_qla_host *vha)
+int qla_nvme_register_hba(struct scsi_qla_host *vha)
{
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret;
+ int ret = EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
- return;
+ return ret;
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
@@ -711,7 +714,9 @@ void qla_nvme_register_hba(struct scsi_qla_host *vha)
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
- return;
+ } else {
+ vha->nvme_local_port->private = vha;
}
- vha->nvme_local_port->private = vha;
+
+ return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 816854ada654..4941d107fb1c 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -142,7 +142,7 @@ struct pt_ls4_rx_unsol {
/*
* Global functions prototype in qla_nvme.c source file.
*/
-void qla_nvme_register_hba(struct scsi_qla_host *);
+int qla_nvme_register_hba(struct scsi_qla_host *);
int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
void qla_nvme_delete(struct scsi_qla_host *);
void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fbd16c8c9a7..42b8f0d3e580 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2816,6 +2816,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ atomic_set(&ha->num_pend_mbx_stage1, 0);
+ atomic_set(&ha->num_pend_mbx_stage2, 0);
+ atomic_set(&ha->num_pend_mbx_stage3, 0);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3046,7 +3049,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ base_vha->mgmt_svr_loop_id =
+ qla2x00_reserve_mgmt_server_loop_id(base_vha);
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3830,14 +3834,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
return;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-
- if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a3,
- "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
- fcport->port_name, fcport->loop_id, fcport->login_retry);
- }
}
/*
@@ -4785,7 +4781,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
- u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d %8phC enter\n",
@@ -4813,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4858,9 +4853,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (N2N_TOPO(vha->hw))
- fcport->flags &= ~FCF_FABRIC_DEVICE;
-
fcport->id_changed = 1;
fcport->scan_state = QLA_FCPORT_FOUND;
memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
@@ -4921,12 +4913,22 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (dfcp)
qlt_schedule_sess_for_deletion(tfcp);
- wwn = wwn_to_u64(fcport->node_name);
- if (!wwn)
- qla24xx_async_gnnid(vha, fcport);
- else
- qla24xx_async_gnl(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ if (N2N_TOPO(vha->hw)) {
+ if (vha->flags.nvme_enabled) {
+ fcport->fc4f_nvme = 1;
+ fcport->n2n_flag = 1;
+ }
+ fcport->fw_login_state = 0;
+ /*
+ * wait link init done before sending login
+ */
+ } else {
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
}
}
@@ -5061,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_IIDMA:
qla_do_iidma_work(vha, e->u.fcport.fcport);
break;
+ case QLA_EVT_ELS_PLOGI:
+ qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ e->u.fcport.fcport, false);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -5090,7 +5096,7 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
void qla2x00_relogin(struct scsi_qla_host *vha)
{
fc_port_t *fcport;
- int status;
+ int status, relogin_needed = 0;
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -5099,47 +5105,59 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* to it if we haven't run out of retries.
*/
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry &&
- !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
- if (vha->hw->current_topology != ISP_CFG_NL) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
- "%s %8phC DS %d LS %d\n", __func__,
- fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RELOGIN;
- ea.fcport = fcport;
- qla2x00_fcport_event_handler(vha, &ea);
- } else if (vha->hw->current_topology == ISP_CFG_NL) {
- fcport->login_retry--;
- status = qla2x00_local_device_login(vha,
- fcport);
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
- ql_dbg(ql_dbg_disc, vha, 0x2003,
- "Port login OK: logged in ID 0x%x.\n",
- fcport->loop_id);
- qla2x00_update_fcport(vha, fcport);
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- /* retry the login again */
- ql_dbg(ql_dbg_disc, vha, 0x2007,
- "Retrying %d login again loop_id 0x%x.\n",
- fcport->login_retry,
- fcport->loop_id);
- } else {
- fcport->login_retry = 0;
- }
+ fcport->login_retry) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_COMPLETE)
+ continue;
+
+ if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
+ fcport->disc_state == DSC_DELETE_PEND) {
+ relogin_needed = 1;
+ } else {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RELOGIN;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else if (vha->hw->current_topology ==
+ ISP_CFG_NL) {
+ fcport->login_retry--;
+ status =
+ qla2x00_local_device_login(vha,
+ fcport);
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id =
+ fcport->loop_id;
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+ qla2x00_update_fcport
+ (vha, fcport);
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry,
+ fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
- if (fcport->login_retry == 0 &&
- status != QLA_SUCCESS)
- qla2x00_clear_loop_id(fcport);
+ if (fcport->login_retry == 0 &&
+ status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
+ }
}
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+ if (relogin_needed)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
ql_dbg(ql_dbg_disc, vha, 0x400e,
"Relogin end.\n");
}
@@ -6179,6 +6197,11 @@ intr_on_check:
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
+ if (test_and_clear_bit(N2N_LINK_RESET,
+ &base_vha->dpc_flags)) {
+ qla2x00_lip_reset(base_vha);
+ }
+
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1027b0cb7fa3..8c811b251d42 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -805,6 +805,10 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
+ "%s %d %8phC Term INOT due to new INOT",
+ __func__, __LINE__,
+ pla->iocb.u.isp24.port_name);
qlt_send_term_imm_notif(vha, &pla->iocb, 1);
memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
return pla;
@@ -982,8 +986,9 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ if (!own)
+ qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
- qlt_send_first_logo(vha, &logo);
}
if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
@@ -1053,7 +1058,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->disc_state = DSC_DELETED;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
sess->deleted = QLA_SESS_DELETED;
- sess->login_retry = vha->hw->login_retry_count;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1073,6 +1077,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
+ own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
if (con) {
iocb = &con->iocb;
@@ -1156,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- qla2x00_mark_device_lost(vha, sess, 1, 1);
+ qla2x00_mark_device_lost(vha, sess, 0, 0);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
sess->disc_state = DSC_DELETE_PEND;
@@ -3782,7 +3787,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -4145,7 +4150,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4276,9 +4281,9 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
{
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return NULL;
@@ -4291,6 +4296,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4714,6 +4720,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s %d %8phC Term INOT due to mem alloc fail",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
qlt_send_term_imm_notif(vha, iocb, 1);
goto out;
}
@@ -5293,7 +5303,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct fc_port *sess;
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5325,7 +5335,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
se_sess = sess->se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return;
@@ -5356,6 +5366,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 731ca0d8520a..0ccd06f11f12 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
}
break;
+ case T268_BUF_TYPE_REQ_MIRROR:
+ case T268_BUF_TYPE_RSP_MIRROR:
+ /*
+ * Mirror pointers are not implemented in the
+ * driver, instead shadow pointers are used by
+ * the drier. Skip these entries.
+ */
+ qla27xx_skip_entry(ent, buf);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
@@ -1028,8 +1037,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd300,
"Firmware has been previously dumped (%p),"
" -- ignoring request\n", vha->hw->fw_dump);
- else
+ else {
+ QLA_FW_STOPPED(vha->hw);
qla27xx_execute_fwdt_template(vha);
+ }
#ifndef __CHECKER__
if (!hardware_locked)
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 1ad7582220c3..3850b28518e5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.07-k"
+#define QLA2XXX_VERSION "10.00.00.08-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7732e9336d43..e03d12a5f986 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1049,10 +1049,8 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1171,10 +1169,8 @@ static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1465,8 +1461,7 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess)
}
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(se_sess);
}
static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
@@ -1543,7 +1538,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode.
*/
- se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+ se_sess = target_setup_session(&tpg->se_tpg, num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_ALL, port_name,
qlat_sess, tcm_qla2xxx_session_cb);
@@ -1624,9 +1619,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
sess->conf_compl_supported = conf_compl_supported;
- /* Reset logout parameters to default */
- sess->logout_on_delete = 1;
- sess->keep_nport_handle = 0;
}
/*
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8578e566ab41..9d09228eee28 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
/* Temporary workaround until bug is found and fixed (one bug has been found
already, but fixing it makes things even worse) -jj */
int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
- host->can_queue = atomic_read(&host->host_busy) + num_free;
+ host->can_queue = scsi_host_busy(host) + num_free;
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c60c260c5da..fc1356d101b0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -162,12 +162,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) & CHECK_CONDITION)
+ if (status_byte(cmd->result) == CHECK_CONDITION)
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
"scsi host busy %d failed %d\n",
- atomic_read(&cmd->device->host->host_busy),
+ scsi_host_busy(cmd->device->host),
cmd->device->host->host_failed);
}
}
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 6dcc4c685d1d..4fd75a3aff66 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -43,7 +43,4 @@ struct scsi_device;
struct scsi_target;
struct scatterlist;
-/* obsolete typedef junk. */
-#include "scsi_typedefs.h"
-
#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 364e71861bfd..60bcc6df97a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -164,29 +164,29 @@ static const char *sdebug_version_date = "20180128";
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
+#define SDEBUG_OPT_CMD_ABORT 0x10000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
- SDEBUG_OPT_HOST_BUSY)
+ SDEBUG_OPT_HOST_BUSY | \
+ SDEBUG_OPT_CMD_ABORT)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_TRANSPORT_ERR is set.
+ * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
+ * CMD_ABORT
*
- * When "every_nth" < 0 then after "- every_nth" commands:
- * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
- * - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SDEBUG_OPT_RECOVERED_ERR is set.
- * - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
- * This will continue on every subsequent command until some other action
- * occurs (e.g. the user * writing a new value (other than -1 or 1) to
- * every_nth via sysfs).
+ * When "every_nth" < 0 then after "- every_nth" commands the selected
+ * error will be injected. The error will be injected on every subsequent
+ * command until some other action occurs; for example, the user writing
+ * a new value (other than -1 or 1) to every_nth:
+ * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
*/
/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
@@ -281,6 +281,7 @@ struct sdebug_defer {
int issuing_cpu;
bool init_hrt;
bool init_wq;
+ bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -296,6 +297,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dix:1;
unsigned int inj_short:1;
unsigned int inj_host_busy:1;
+ unsigned int inj_cmd_abort:1;
};
struct sdebug_queue {
@@ -3792,6 +3794,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
+ bool aborted = sd_dp->aborted;
int qc_idx;
int retiring = 0;
unsigned long iflags;
@@ -3801,6 +3804,8 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct sdebug_dev_info *devip;
sd_dp->defer_t = SDEB_DEFER_NONE;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3852,6 +3857,11 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
atomic_set(&retired_max_queue, k + 1);
}
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (unlikely(aborted)) {
+ if (sdebug_verbose)
+ pr_info("bypassing scsi_done() due to aborted cmd\n");
+ return;
+ }
scp->scsi_done(scp); /* callback to mid level */
}
@@ -4312,7 +4322,8 @@ static void setup_inject(struct sdebug_queue *sqp,
if (sdebug_every_nth > 0)
sqcp->inj_recovered = sqcp->inj_transport
= sqcp->inj_dif
- = sqcp->inj_dix = sqcp->inj_short = 0;
+ = sqcp->inj_dix = sqcp->inj_short
+ = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
return;
}
sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
@@ -4321,6 +4332,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
+ sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4458,7 +4470,14 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_WQ;
+ if (unlikely(sqcp->inj_cmd_abort))
+ sd_dp->aborted = true;
schedule_work(&sd_dp->ew.work);
+ if (unlikely(sqcp->inj_cmd_abort)) {
+ sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+ cmnd->request->tag);
+ blk_abort_request(cmnd->request);
+ }
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
(scsi_result == device_qfull_result)))
@@ -4844,12 +4863,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
(unsigned long)sdebug_dev_size_mb *
1048576;
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 9\n");
return -ENOMEM;
}
- memset(fake_storep, 0, sz);
}
sdebug_fake_rw = n;
}
@@ -5391,13 +5409,12 @@ static int __init scsi_debug_init(void)
}
if (sdebug_fake_rw == 0) {
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 1\n");
ret = -ENOMEM;
goto free_q_arr;
}
- memset(fake_storep, 0, sz);
if (sdebug_num_parts > 0)
sdebug_build_parts(fake_storep, sz);
}
@@ -5790,11 +5807,13 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
fini:
if (F_DELAY_OVERR & flags)
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
- else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
+ else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
+ sdebug_ndelay > 10000)) {
/*
- * If any delay is active, for F_SSU_DELAY want at least 1
- * second and if sdebug_jdelay>0 want a long delay of that
- * many seconds; for F_SYNC_DELAY want 1/20 of that.
+ * Skip long delays if ndelay <= 10 microseconds. Otherwise
+ * for Start Stop Unit (SSU) want at least 1 second delay and
+ * if sdebug_jdelay>1 want a long delay of that many seconds.
+ * For Synchronize Cache want 1/20 of SSU's delay.
*/
int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2715cdaa669c..b7a8fdfeb2f4 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -66,7 +66,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
{
lockdep_assert_held(shost->host_lock);
- if (atomic_read(&shost->host_busy) == shost->host_failed) {
+ if (scsi_host_busy(shost) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -2169,7 +2169,7 @@ int scsi_error_handler(void *data)
break;
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
- shost->host_failed != atomic_read(&shost->host_busy)) {
+ shost->host_failed != scsi_host_busy(shost)) {
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"scsi_eh_%d: sleeping\n",
@@ -2184,7 +2184,7 @@ int scsi_error_handler(void *data)
"scsi_eh_%d: waking up %d/%d/%d\n",
shost->host_no, shost->host_eh_scheduled,
shost->host_failed,
- atomic_read(&shost->host_busy)));
+ scsi_host_busy(shost)));
/*
* We have a host that is failing for some reason. Figure out
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 0a875491f5a7..cc30fccc1a2e 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -100,8 +100,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9cb9a166fa0c..0adfb3bce0fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ if (!shost->use_blk_mq)
+ atomic_dec(&shost->host_busy);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -371,7 +372,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
- blk_mq_start_hw_queues(q);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
+ /*
+ * blk-mq can handle host queue busy efficiently via host-wide driver
+ * tag allocation
+ */
+
+ if (!shost->use_blk_mq && shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
@@ -662,6 +668,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
cmd->request->next_rq->special = NULL;
}
+/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
@@ -760,161 +767,39 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with -EIO to fail
- * the remainder of the request.
- */
-void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+/* Helper for scsi_io_completion() when "reprep" action required. */
+static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
+ struct request_queue *q)
+{
+ /* A new command will be prepared and issued. */
+ if (q->mq_ops) {
+ scsi_mq_requeue_cmd(cmd);
+ } else {
+ /* Unprep request and put it back at head of the queue. */
+ scsi_release_buffers(cmd);
+ scsi_requeue_command(q, cmd);
+ }
+}
+
+/* Helper for scsi_io_completion() when special action required. */
+static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
- blk_status_t error = BLK_STS_OK;
- struct scsi_sense_hdr sshdr;
- bool sense_valid = false;
- int sense_deferred = 0, level = 0;
+ int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
+ struct scsi_sense_hdr sshdr;
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ blk_status_t blk_stat;
- if (result) {
- sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
- if (sense_valid)
- sense_deferred = scsi_sense_is_deferred(&sshdr);
- }
-
- if (blk_rq_is_passthrough(req)) {
- if (result) {
- if (sense_valid) {
- /*
- * SG_IO wants current and deferred errors
- */
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
- }
- if (!sense_deferred)
- error = scsi_result_to_blk_status(cmd, result);
- }
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- scsi_req(req)->resid_len = scsi_get_resid(cmd);
-
- if (scsi_bidi_cmnd(cmd)) {
- /*
- * Bidi commands Must be complete as a whole,
- * both sides at once.
- */
- scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
- if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
- blk_rq_bytes(req->next_rq)))
- BUG();
- return;
- }
- } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
- /*
- * Flush commands do not transfers any data, and thus cannot use
- * good_bytes != blk_rq_bytes(req) as the signal for an error.
- * This sets the error explicitly for the problem case.
- */
- error = scsi_result_to_blk_status(cmd, result);
- }
-
- /* no bidi support for !blk_rq_is_passthrough yet */
- BUG_ON(blk_bidi_rq(req));
-
- /*
- * Next deal with any sectors which we were able to correctly
- * handle.
- */
- SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
- "%u sectors total, %d bytes done.\n",
- blk_rq_sectors(req), good_bytes));
-
- /*
- * Recovered errors need reporting, but they're always treated as
- * success, so fiddle the result code here. For passthrough requests
- * we already took a copy of the original into sreq->result which
- * is what gets returned to the user
- */
- if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
- /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
- * print since caller wants ATA registers. Only occurs on
- * SCSI ATA PASS_THROUGH commands when CK_COND=1
- */
- if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
- ;
- else if (!(req->rq_flags & RQF_QUIET))
- scsi_print_sense(cmd);
- result = 0;
- /* for passthrough error may be set */
- error = BLK_STS_OK;
- }
- /*
- * Another corner case: the SCSI status byte is non-zero but 'good'.
- * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
- * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
- * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
- * intermediate statuses (both obsolete in SAM-4) as good.
- */
- if (status_byte(result) && scsi_status_is_good(result)) {
- result = 0;
- error = BLK_STS_OK;
- }
-
- /*
- * special case: failed zero length commands always need to
- * drop down into the retry code. Otherwise, if we finished
- * all bytes in the request we are done now.
- */
- if (!(blk_rq_bytes(req) == 0 && error) &&
- !scsi_end_request(req, error, good_bytes, 0))
- return;
-
- /*
- * Kill remainder if no retrys.
- */
- if (error && scsi_noretry_cmd(cmd)) {
- if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
- BUG();
- return;
- }
-
- /*
- * If there had been no error, but we have leftover bytes in the
- * requeues just queue the command up again.
- */
- if (result == 0)
- goto requeue;
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
- error = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -922,7 +807,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* happens.
*/
action = ACTION_RETRY;
- } else if (sense_valid && !sense_deferred) {
+ } else if (sense_valid && sense_current) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
@@ -958,18 +843,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
- error = BLK_STS_TARGET;
+ blk_stat = BLK_STS_TARGET;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -1022,8 +907,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
DEFAULT_RATELIMIT_BURST);
if (unlikely(scsi_logging_level))
- level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
- SCSI_LOG_MLCOMPLETE_BITS);
+ level =
+ SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
/*
* if logging is enabled the failure will be printed
@@ -1031,25 +917,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) & DRIVER_SENSE)
+ if (driver_byte(result) == DRIVER_SENSE)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
+ if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
return;
/*FALLTHRU*/
case ACTION_REPREP:
- requeue:
- /* Unprep the request and put it back at the head of the queue.
- * A new command will be prepared and issued.
- */
- if (q->mq_ops) {
- scsi_mq_requeue_cmd(cmd);
- } else {
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- }
+ scsi_io_completion_reprep(cmd, q);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -1062,6 +939,185 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
}
+/*
+ * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
+ * new result that may suppress further error checking. Also modifies
+ * *blk_statp in some cases.
+ */
+static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
+ blk_status_t *blk_statp)
+{
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ struct request *req = cmd->request;
+ struct scsi_sense_hdr sshdr;
+
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
+
+ if (blk_rq_is_passthrough(req)) {
+ if (sense_valid) {
+ /*
+ * SG_IO wants current and deferred errors
+ */
+ scsi_req(req)->sense_len =
+ min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
+ }
+ if (sense_current)
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ } else if (blk_rq_bytes(req) == 0 && sense_current) {
+ /*
+ * Flush commands do not transfers any data, and thus cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets *blk_statp explicitly for the problem case.
+ */
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ }
+ /*
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
+ * we already took a copy of the original into sreq->result which
+ * is what gets returned to the user
+ */
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ bool do_print = true;
+ /*
+ * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
+ * skip print since caller wants ATA registers. Only occurs
+ * on SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ do_print = false;
+ else if (req->rq_flags & RQF_QUIET)
+ do_print = false;
+ if (do_print)
+ scsi_print_sense(cmd);
+ result = 0;
+ /* for passthrough, *blk_statp may be set */
+ *blk_statp = BLK_STS_OK;
+ }
+ /*
+ * Another corner case: the SCSI status byte is non-zero but 'good'.
+ * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
+ * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
+ * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
+ * intermediate statuses (both obsolete in SAM-4) as good.
+ */
+ if (status_byte(result) && scsi_status_is_good(result)) {
+ result = 0;
+ *blk_statp = BLK_STS_OK;
+ }
+ return result;
+}
+
+/*
+ * Function: scsi_io_completion()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: cmd - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
+ * figure out what to do next:
+ *
+ * a) We can call scsi_requeue_command(). The request
+ * will be unprepared and put back on the queue. Then
+ * a new command will be created for it. This should
+ * be used if we made forward progress, or if we want
+ * to switch from READ(10) to READ(6) for example.
+ *
+ * b) We can call __scsi_queue_insert(). The request will
+ * be put back on the queue and retried using the same
+ * command as before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
+ */
+void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request_queue *q = cmd->device->request_queue;
+ struct request *req = cmd->request;
+ blk_status_t blk_stat = BLK_STS_OK;
+
+ if (unlikely(result)) /* a nz result may or may not be an error */
+ result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
+
+ if (unlikely(blk_rq_is_passthrough(req))) {
+ /*
+ * scsi_result_to_blk_status may have reset the host_byte
+ */
+ scsi_req(req)->result = cmd->result;
+ scsi_req(req)->resid_len = scsi_get_resid(cmd);
+
+ if (unlikely(scsi_bidi_cmnd(cmd))) {
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
+ if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true,
+ "Bidi command with remaining bytes");
+ return;
+ }
+ }
+
+ /* no bidi support yet, other than in pass-through */
+ if (unlikely(blk_bidi_rq(req))) {
+ WARN_ONCE(true, "Only support bidi command in passthrough");
+ scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
+ if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true, "Bidi command with remaining bytes");
+ return;
+ }
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle.
+ */
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
+ "%u sectors total, %d bytes done.\n",
+ blk_rq_sectors(req), good_bytes));
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle. Failed, zero length commands always need to drop down
+ * to retry code. Fast path should return in this block.
+ */
+ if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
+ if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
+ return; /* no bytes remaining */
+ }
+
+ /* Kill remainder if no retries. */
+ if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
+ if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
+ WARN_ONCE(true,
+ "Bytes remaining after failed, no-retry command");
+ return;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (likely(result == 0))
+ scsi_io_completion_reprep(cmd, q);
+ else
+ scsi_io_completion_action(cmd, result);
+}
+
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
{
int count;
@@ -1550,7 +1606,10 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
+ if (!shost->use_blk_mq)
+ busy = atomic_inc_return(&shost->host_busy) - 1;
+ else
+ busy = 0;
if (atomic_read(&shost->host_blocked) > 0) {
if (busy)
goto starved;
@@ -1566,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
+ if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1652,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* with the locks as normal issue path does.
*/
atomic_inc(&sdev->device_busy);
- atomic_inc(&shost->host_busy);
+
+ if (!shost->use_blk_mq)
+ atomic_inc(&shost->host_busy);
if (starget->can_queue > 0)
atomic_inc(&starget->target_busy);
@@ -2555,7 +2616,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
* ILLEGAL REQUEST if the code page isn't supported */
if (use_10_for_ms && !scsi_status_is_good(result) &&
- (driver_byte(result) & DRIVER_SENSE)) {
+ driver_byte(result) == DRIVER_SENSE) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0880d975eed3..78ca63dfba4a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -614,7 +614,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if ((driver_byte(result) & DRIVER_SENSE) &&
+ if (driver_byte(result) == DRIVER_SENSE &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7943b762c12d..3aee9464a7bf 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -382,7 +382,7 @@ static ssize_t
show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
- return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
+ return snprintf(buf, 20, "%d\n", scsi_host_busy(shost));
}
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
@@ -722,8 +722,24 @@ static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (device_remove_file_self(dev, attr))
- scsi_remove_device(to_scsi_device(dev));
+ struct kernfs_node *kn;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /*
+ * Concurrent writes into the "delete" sysfs attribute may trigger
+ * concurrent calls to device_remove_file() and scsi_remove_device().
+ * device_remove_file() handles concurrent removal calls by
+ * serializing these and by ignoring the second and later removal
+ * attempts. Concurrent calls of scsi_remove_device() are
+ * serialized. The second and later calls of scsi_remove_device() are
+ * ignored because the first call of that function changes the device
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+ scsi_remove_device(to_scsi_device(dev));
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 13948102ca29..381668fa135d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -567,7 +567,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = 0;
event->host_no = shost->host_no;
event->event_datalen = sizeof(u32); /* bytes */
@@ -635,7 +635,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = vendor_id;
event->host_no = shost->host_no;
event->event_datalen = data_len; /* bytes */
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 2ca150b16764..40b85b752b79 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -136,7 +136,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
0, NULL);
- if (!(driver_byte(result) & DRIVER_SENSE) ||
+ if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
diff --git a/drivers/scsi/scsi_typedefs.h b/drivers/scsi/scsi_typedefs.h
deleted file mode 100644
index 2ed4c5cb7088..000000000000
--- a/drivers/scsi/scsi_typedefs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-typedef struct scsi_cmnd Scsi_Cmnd;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bbebdc3769b0..a58cee7a85f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1635,7 +1635,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
/* we need to evaluate the error return */
@@ -1737,8 +1737,8 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
&sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
}
@@ -2028,7 +2028,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
} else {
sdkp->device->no_write_same = 1;
sd_config_write_same(sdkp);
- req->__data_len = blk_rq_bytes(req);
req->rq_flags |= RQF_QUIET;
}
break;
@@ -2097,10 +2096,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries++;
} while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) & DRIVER_SENSE) &&
+ ((driver_byte(the_result) == DRIVER_SENSE) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
+ if (driver_byte(the_result) != DRIVER_SENSE) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2226,7 +2225,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) & DRIVER_SENSE)
+ if (driver_byte(the_result) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -3492,7 +3491,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 2bf3bf73886e..412c1787dcd9 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -148,12 +148,6 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdkp->device->sector_size;
cmd->allowed = 0;
- /*
- * Report may return less bytes than requested. Make sure
- * to report completion on the entire initial request.
- */
- rq->__data_len = nr_bytes;
-
return BLKPREP_OK;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 139e13c73b41..8a254bb46a9b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1875,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
- gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
@@ -1905,9 +1905,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- gfp_mask |= __GFP_ZERO;
-
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
@@ -1918,7 +1915,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index dc3a0542a2e8..e97bf2670315 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -483,6 +483,8 @@ struct pqi_raid_error_info {
#define CISS_CMD_STATUS_TMF 0xd
#define CISS_CMD_STATUS_AIO_DISABLED 0xe
+#define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED
+
#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
@@ -581,8 +583,8 @@ struct pqi_admin_queues_aligned {
struct pqi_admin_queues {
void *iq_element_array;
void *oq_element_array;
- volatile pqi_index_t *iq_ci;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t *iq_ci;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_element_array_bus_addr;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t iq_ci_bus_addr;
@@ -606,8 +608,8 @@ struct pqi_queue_group {
dma_addr_t oq_element_array_bus_addr;
__le32 __iomem *iq_pi[2];
pqi_index_t iq_pi_copy[2];
- volatile pqi_index_t *iq_ci[2];
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *iq_ci[2];
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_ci_bus_addr[2];
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -620,7 +622,7 @@ struct pqi_event_queue {
u16 oq_id;
u16 int_msg_num;
void *oq_element_array;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b78d20b74ed8..2112ea6723c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.4-115"
+#define DRIVER_VERSION "1.1.4-130"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 115
+#define DRIVER_REVISION 130
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1197,20 +1197,30 @@ no_buffer:
device->volume_offline = volume_offline;
}
+#define PQI_INQUIRY_PAGE0_RETRIES 3
+
static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
+ unsigned int retries;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
- if (rc)
- goto out;
+ for (retries = 0;;) {
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
+ buffer, 64);
+ if (rc == 0)
+ break;
+ if (pqi_is_logical_device(device) ||
+ rc != PQI_CMD_STATUS_ABORTED ||
+ ++retries > PQI_INQUIRY_PAGE0_RETRIES)
+ goto out;
+ }
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -2693,7 +2703,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
oq_ci = queue_group->oq_ci_copy;
while (1) {
- oq_pi = *queue_group->oq_pi;
+ oq_pi = readl(queue_group->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -2784,7 +2794,7 @@ static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
iq_pi = queue_group->iq_pi_copy[RAID_PATH];
- iq_ci = *queue_group->iq_ci[RAID_PATH];
+ iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
if (pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -2943,7 +2953,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
oq_ci = event_queue->oq_ci_copy;
while (1) {
- oq_pi = *event_queue->oq_pi;
+ oq_pi = readl(event_queue->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -3167,7 +3177,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
size_t element_array_length_per_iq;
size_t element_array_length_per_oq;
void *element_array;
- void *next_queue_index;
+ void __iomem *next_queue_index;
void *aligned_pointer;
unsigned int num_inbound_queues;
unsigned int num_outbound_queues;
@@ -3263,7 +3273,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
PQI_EVENT_OQ_ELEMENT_LENGTH;
- next_queue_index = PTR_ALIGN(element_array,
+ next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
@@ -3271,21 +3281,24 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
queue_group->iq_ci[RAID_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[RAID_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->iq_ci[AIO_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[AIO_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->oq_pi = next_queue_index;
queue_group->oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
@@ -3294,7 +3307,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->event_queue.oq_pi = next_queue_index;
ctrl_info->event_queue.oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
return 0;
}
@@ -3368,7 +3382,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
- admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+ admin_queues->oq_pi =
+ (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
admin_queues->iq_element_array_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
@@ -3384,8 +3399,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->oq_pi -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->oq_pi -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
return 0;
}
@@ -3486,7 +3501,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
- oq_pi = *admin_queues->oq_pi;
+ oq_pi = readl(admin_queues->oq_pi);
if (oq_pi != oq_ci)
break;
if (time_after(jiffies, timeout)) {
@@ -3545,7 +3560,7 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
DIV_ROUND_UP(iu_length,
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -3621,29 +3636,24 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_submit_raid_request_synchronous_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
- unsigned long timeout_msecs)
+static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
+ *error_info)
{
- int rc = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int rc = -EIO;
- io_request->io_complete_callback = pqi_raid_synchronous_complete;
- io_request->context = &wait;
-
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
- io_request);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
+ switch (error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ if (error_info->status == SAM_STAT_GOOD)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
+ if (error_info->status == SAM_STAT_GOOD ||
+ error_info->status == SAM_STAT_CHECK_CONDITION)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_ABORTED:
+ rc = PQI_CMD_STATUS_ABORTED;
+ break;
}
return rc;
@@ -3653,11 +3663,12 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
- int rc;
+ int rc = 0;
struct pqi_io_request *io_request;
unsigned long start_jiffies;
unsigned long msecs_blocked;
size_t iu_length;
+ DECLARE_COMPLETION_ONSTACK(wait);
/*
* Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
@@ -3686,11 +3697,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_ctrl_busy(ctrl_info);
timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
if (timeout_msecs == 0) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ETIMEDOUT;
goto out;
}
if (pqi_ctrl_offline(ctrl_info)) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
@@ -3708,8 +3721,25 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
PQI_REQUEST_HEADER_LENGTH;
memcpy(io_request->iu, request, iu_length);
- rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
- io_request, timeout_msecs);
+ io_request->io_complete_callback = pqi_raid_synchronous_complete;
+ io_request->context = &wait;
+
+ pqi_start_io(ctrl_info,
+ &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ io_request);
+
+ pqi_ctrl_unbusy(ctrl_info);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ pqi_wait_for_completion_io(ctrl_info, &wait);
+ } else {
+ if (!wait_for_completion_io_timeout(&wait,
+ msecs_to_jiffies(timeout_msecs))) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "command timed out\n");
+ rc = -ETIMEDOUT;
+ }
+ }
if (error_info) {
if (io_request->error_info)
@@ -3718,25 +3748,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- u8 scsi_status;
- struct pqi_raid_error_info *raid_error_info;
-
- raid_error_info = io_request->error_info;
- scsi_status = raid_error_info->status;
-
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- raid_error_info->data_out_result ==
- PQI_DATA_IN_OUT_UNDERFLOW)
- scsi_status = SAM_STAT_GOOD;
-
- if (scsi_status != SAM_STAT_GOOD)
- rc = -EIO;
+ rc = pqi_process_raid_io_error_synchronous(
+ io_request->error_info);
}
pqi_free_io_request(io_request);
out:
- pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -5041,7 +5059,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
iq_pi = queue_group->iq_pi_copy[path];
while (1) {
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (iq_ci == iq_pi)
break;
pqi_check_ctrl_health(ctrl_info);
@@ -6230,20 +6248,20 @@ static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues = &ctrl_info->admin_queues;
admin_queues->iq_pi_copy = 0;
admin_queues->oq_ci_copy = 0;
- *admin_queues->oq_pi = 0;
+ writel(0, admin_queues->oq_pi);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
ctrl_info->queue_groups[i].oq_ci_copy = 0;
- *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
- *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
- *ctrl_info->queue_groups[i].oq_pi = 0;
+ writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
+ writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
+ writel(0, ctrl_info->queue_groups[i].oq_pi);
}
event_queue = &ctrl_info->event_queue;
- *event_queue->oq_pi = 0;
+ writel(0, event_queue->oq_pi);
event_queue->oq_ci_copy = 0;
}
@@ -6826,6 +6844,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
@@ -6950,6 +6980,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADVANTECH, 0x8312)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_DELL, 0x1fe0)
},
{
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 269ddf791a73..0abe17c1a73b 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -200,7 +200,7 @@ snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
- struct timespec last_isr_tms, last_ack_tms;
+ struct timespec64 last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
@@ -312,12 +312,12 @@ snic_stats_show(struct seq_file *sfp, void *data)
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
- jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
- jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
+ jiffies_to_timespec64(stats->misc.last_isr_time, &last_isr_tms);
+ jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
- "Last ISR Time : %llu (%8lu.%8lu)\n"
- "Last Ack Time : %llu (%8lu.%8lu)\n"
+ "Last ISR Time : %llu (%8llu.%09lu)\n"
+ "Last Ack Time : %llu (%8llu.%09lu)\n"
"Ack ISRs : %llu\n"
"IO Cmpl ISRs : %llu\n"
"Err Notify ISRs : %llu\n"
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f00ebf4717e0..fc60c933d6c0 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -65,12 +65,12 @@ static int
snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
{
int len = 0;
- struct timespec tmspec;
+ struct timespec64 tmspec;
- jiffies_to_timespec(td->ts, &tmspec);
+ jiffies_to_timespec64(td->ts, &tmspec);
len += snprintf(buf, buf_sz,
- "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
+ "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
tmspec.tv_sec,
tmspec.tv_nsec,
td->fn,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 50c66ccc4b41..307df2fa39a3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -828,11 +828,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
static int flush_buffer(struct scsi_tape *STp, int seek_next)
{
int backspace, result;
- struct st_buffer *STbuffer;
struct st_partstat *STps;
- STbuffer = STp->buffer;
-
/*
* If there was a bus reset, block further access
* to this device.
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 190770bdc194..91db17727963 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -295,10 +295,8 @@ static void
sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw1a_scr *scripta0;
- struct sym_fw1b_scr *scriptb0;
scripta0 = (struct sym_fw1a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
@@ -319,10 +317,8 @@ static void
sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw2a_scr *scripta0;
- struct sym_fw2b_scr *scriptb0;
scripta0 = (struct sym_fw2a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 7320d5fe4cbc..5f10aa9bad9b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -252,7 +252,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
+ cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index 805369521df8..e34801ae5d69 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -256,7 +256,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
{
scsi_set_resid(cmd, resid);
- cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
+ cmd->result = (DID_OK << 16) | (cp->ssss_status & 0x7f);
}
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 378af306fda1..bd3f6e2d6834 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3855,7 +3855,7 @@ out_reject:
int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
{
- int dp_sg, dp_sgmin, resid = 0;
+ int dp_sg, resid = 0;
int dp_ofs = 0;
/*
@@ -3902,7 +3902,6 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
* We are now full comfortable in the computation
* of the data residual (2's complement).
*/
- dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
resid = -cp->ext_ofs;
for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e27b4d4e6ae2..e09fe6ab3572 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -100,3 +100,12 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
+
+config SCSI_UFS_HISI
+ tristate "Hisilicon specific hooks to UFS controller platform driver"
+ depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ ---help---
+ This selects the Hisilicon specific additions to UFSHCD platform driver.
+
+ Select this if you have UFS controller on Hisilicon chipset.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 918f5791202d..2c50f03d8c4a 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-objs := ufshcd.o ufs-sysfs.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
new file mode 100644
index 000000000000..46df707e6f2c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -0,0 +1,619 @@
+/*
+ * HiSilicon Hixxxx UFS Driver
+ *
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-hisi.h"
+#include "ufshci.h"
+
+static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 tx_fsm_val_0 = 0;
+ u32 tx_fsm_val_1 = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
+ tx_fsm_val_1 == TX_FSM_HIBERN8))
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout)) {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
+ tx_fsm_val_1 != TX_FSM_HIBERN8) {
+ err = -1;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
+ __func__, tx_fsm_val_0, tx_fsm_val_1);
+ }
+
+ return err;
+}
+
+static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
+ mdelay(1);
+ /* use abb clk */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
+ /* open mphy ref clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+}
+
+static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ u32 reg;
+
+ if (!IS_ERR(host->rst))
+ reset_control_assert(host->rst);
+
+ /* HC_PSW powerup */
+ ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
+ udelay(10);
+ /* notify PWR ready */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
+ UFS_DEVICE_RESET_CTRL);
+
+ reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
+ reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
+ /* set cfg clk freq */
+ ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
+ /* set ref clk freq */
+ ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
+ /* bypass ufs clk gate */
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+
+ /* open psw clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
+ /* disable ufshc iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
+ /* disable phy iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
+ /* notice iso disable */
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
+
+ /* disable lp_reset_n */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
+ mdelay(1);
+
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
+ UFS_DEVICE_RESET_CTRL);
+
+ msleep(20);
+
+ /*
+ * enable the fix of linereset recovery,
+ * and enable rx_reset/tx_rest beat
+ * enable ref_clk_en override(bit5) &
+ * override value = 1(bit4), with mask
+ */
+ ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
+
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+}
+
+static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
+{
+ int err;
+ uint32_t value;
+ uint32_t reg;
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
+ if (value != 0x1)
+ dev_info(hba->dev,
+ "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
+ err = ufs_hisi_check_hibern8(hba);
+ if (err)
+ dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
+
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+
+ /* disable auto H8 */
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ reg = reg & (~UFS_AHIT_AH8ITV_MASK);
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* Unipro PA_Local_TX_LCC_Enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ /* close Unipro VS_Mk2ExtnSupport */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
+ if (value != 0) {
+ /* Ensure close success */
+ dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
+ }
+
+ return err;
+}
+
+static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro DL_AFC0CreditThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
+ /* Unipro DL_TC0OutAckThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
+ /* Unipro DL_TC0TXFCThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
+
+ /* not bypass ufs clk gate */
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
+ UFS_SYSCTRL);
+
+ /* select received symbol cnt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
+ /* reset counter0 and enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
+
+ return 0;
+}
+
+static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_hisi_link_startup_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ err = ufs_hisi_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+struct ufs_hisi_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_hisi_get_pwr_dev_param(
+ struct ufs_hisi_dev_params *hisi_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_hisi_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_hisi_max_hs = false;
+
+ if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (hisi_param->desired_working_mode == FAST) {
+ is_hisi_max_hs = true;
+ min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
+ hisi_param->hs_tx_gear);
+ } else {
+ min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
+ hisi_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * hisi_param->desired_working_mode is HS,
+ * thus device and hisi_param don't agree
+ */
+ if (!is_dev_sup_hs && is_hisi_max_hs) {
+ pr_err("%s: device not support HS\n", __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_hisi_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since hisi_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to hisi_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_hs;
+ } else {
+ /*
+ * here hisi_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases hisi_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx =
+ min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
+ agreed_pwr->lane_rx =
+ min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_hisi_max_hs) ||
+ (!is_dev_sup_hs && !is_hisi_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_hisi_gear);
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
+
+ agreed_pwr->hs_rate = hisi_param->hs_rate;
+
+ pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
+ agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
+ agreed_pwr->hs_rate);
+ return 0;
+}
+
+static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+{
+ hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
+ hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
+ hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
+ hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
+ hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
+ hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
+ hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
+ hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
+ hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
+ hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
+ hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
+ hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
+}
+
+static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
+{
+ /* update */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
+ /* PA_TxSkip */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
+ /*PA_PWRModeUserData0 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ /*PA_PWRModeUserData1 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ /*PA_PWRModeUserData2 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ /*PA_PWRModeUserData3 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ /*PA_PWRModeUserData4 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ /*PA_PWRModeUserData5 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+}
+
+static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_hisi_dev_params ufs_hisi_cap;
+ int ret = 0;
+
+ if (!dev_req_params) {
+ dev_err(hba->dev,
+ "%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_hisi_set_dev_cap(&ufs_hisi_cap);
+ ret = ufs_hisi_get_pwr_dev_param(
+ &ufs_hisi_cap, dev_max_params, dev_req_params);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ ufs_hisi_pwr_change_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_runtime_pm(pm_op))
+ return 0;
+
+ if (host->in_suspend) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ udelay(10);
+ /* set ref_dig_clk override of PHY PCS to 0 */
+ ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
+
+ host->in_suspend = true;
+
+ return 0;
+}
+
+static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (!host->in_suspend)
+ return 0;
+
+ /* set ref_dig_clk override of PHY PCS to 1 */
+ ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
+ udelay(10);
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+
+ host->in_suspend = false;
+ return 0;
+}
+
+static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
+{
+ struct resource *mem_res;
+ struct device *dev = host->hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* get resource of ufs sys ctrl */
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return PTR_ERR(host->ufs_sys_ctrl);
+
+ return 0;
+}
+
+static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_1;
+ hba->spm_lvl = UFS_PM_LVL_3;
+}
+
+/**
+ * ufs_hisi_init_common
+ * @hba: host controller instance
+ */
+static int ufs_hisi_init_common(struct ufs_hba *hba)
+{
+ int err = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
+
+ ufs_hisi_set_pm_lvl(hba);
+
+ err = ufs_hisi_get_resource(host);
+ if (err) {
+ ufshcd_set_variant(hba, NULL);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ufs_hi3660_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hi3660_clk_init(hba);
+
+ ufs_hi3660_soc_init(hba);
+
+ return 0;
+}
+
+static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
+ .name = "hi3660",
+ .init = ufs_hi3660_init,
+ .link_startup_notify = ufs_hi3660_link_startup_notify,
+ .pwr_change_notify = ufs_hi3660_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static int ufs_hisi_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
+}
+
+static int ufs_hisi_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
+static const struct dev_pm_ops ufs_hisi_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_hisi_pltform = {
+ .probe = ufs_hisi_probe,
+ .remove = ufs_hisi_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-hisi",
+ .pm = &ufs_hisi_pm_ops,
+ .of_match_table = of_match_ptr(ufs_hisi_of_match),
+ },
+};
+module_platform_driver(ufs_hisi_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ufshcd-hisi");
+MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
new file mode 100644
index 000000000000..3df9cd7acc29
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, HiSilicon. All rights reserved.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef UFS_HISI_H_
+#define UFS_HISI_H_
+
+#define HBRN8_POLL_TOUT_MS 1000
+
+/*
+ * ufs sysctrl specific define
+ */
+#define PSW_POWER_CTRL (0x04)
+#define PHY_ISO_EN (0x08)
+#define HC_LP_CTRL (0x0C)
+#define PHY_CLK_CTRL (0x10)
+#define PSW_CLK_CTRL (0x14)
+#define CLOCK_GATE_BYPASS (0x18)
+#define RESET_CTRL_EN (0x1C)
+#define UFS_SYSCTRL (0x5C)
+#define UFS_DEVICE_RESET_CTRL (0x60)
+
+#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
+#define BIT_SYSCTRL_PWR_READY (1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
+#define UFS_FREQ_CFG_CLK (0x39)
+#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
+#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
+#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
+#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
+#define MASK_UFS_DEVICE_RESET (0x1 << 16)
+#define BIT_UFS_DEVICE_RESET (0x1)
+
+/*
+ * M-TX Configuration Attributes for Hixxxx
+ */
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+
+/*
+ * Hixxxx UFS HC specific Registers
+ */
+enum {
+ UFS_REG_OCPTHRTL = 0xc0,
+ UFS_REG_OOCPR = 0xc4,
+
+ UFS_REG_CDACFG = 0xd0,
+ UFS_REG_CDATX1 = 0xd4,
+ UFS_REG_CDATX2 = 0xd8,
+ UFS_REG_CDARX1 = 0xdc,
+ UFS_REG_CDARX2 = 0xe0,
+ UFS_REG_CDASTA = 0xe4,
+
+ UFS_REG_LBMCFG = 0xf0,
+ UFS_REG_LBMSTA = 0xf4,
+ UFS_REG_UFSMODE = 0xf8,
+
+ UFS_REG_HCLKDIV = 0xfc,
+};
+
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFS_AHIT_AH8ITV_MASK 0x3FF
+
+/* REG UFS_REG_OCPTHRTL definition */
+#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_HISI_LIMIT_NUM_LANES_RX 2
+#define UFS_HISI_LIMIT_NUM_LANES_TX 2
+#define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3
+#define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3
+#define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_HISI_LIMIT_DESIRED_MODE FAST
+
+struct ufs_hisi_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_sys_ctrl;
+
+ struct reset_control *rst;
+
+ uint64_t caps;
+
+ bool in_suspend;
+};
+
+#define ufs_sys_ctrl_writel(host, val, reg) \
+ writel((val), (host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_set_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel( \
+ (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel((host), \
+ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
+ (reg))
+#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 221820a7c78b..75ee5906b966 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
-static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
- char *prefix)
-{
- print_hex_dump(KERN_ERR, prefix,
- len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
- 16, 4, (void __force *)hba->mmio_base + offset,
- len * 4, false);
-}
-
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- char *prefix, void *priv)
+ const char *prefix, void *priv)
{
- ufs_qcom_dump_regs(hba, offset, len, prefix);
+ ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
@@ -1431,7 +1422,7 @@ out:
static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, char *str, void *priv))
+ int offset, int num_regs, const char *str, void *priv))
{
u32 reg;
struct ufs_qcom_host *host;
@@ -1613,7 +1604,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
}
static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
@@ -1639,8 +1630,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
- "HCI Vendor Specific Registers ");
+ ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
+ "HCI Vendor Specific Registers ");
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 397081d320b1..9d5d2ca7fc4f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -99,8 +99,29 @@
_ret; \
})
-#define ufshcd_hex_dump(prefix_str, buf, len) \
-print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+#define ufshcd_hex_dump(prefix_str, buf, len) do { \
+ size_t __len = (len); \
+ print_hex_dump(KERN_ERR, prefix_str, \
+ __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
+ 16, 4, buf, __len, false); \
+} while (0)
+
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix)
+{
+ u8 *regs;
+
+ regs = kzalloc(len, GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ memcpy_fromio(regs, hba->mmio_base + offset, len);
+ ufshcd_hex_dump(prefix, regs, len);
+ kfree(regs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -321,18 +342,19 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
sector_t lba = -1;
u8 opcode = 0;
u32 intr, doorbell;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int transfer_len = -1;
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
-
- if (!trace_ufshcd_command_enabled())
+ if (!trace_ufshcd_command_enabled()) {
+ /* trace UPIU W/O tracing command */
+ if (lrbp->cmd)
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
-
- lrbp = &hba->lrb[tag];
+ }
if (lrbp->cmd) { /* data phase exists */
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
opcode = (u8)(*lrbp->cmd->cmnd);
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -386,15 +408,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
- /*
- * hex_dump reads its data without the readl macro. This might
- * cause inconsistency issues on some platform, as the printed
- * values may be from cache and not the most recent value.
- * To know whether you are looking at an un-cached version verify
- * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
- * during platform/pci probe function.
- */
- ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
hba->ufs_version, hba->capabilities);
dev_err(hba->dev,
@@ -7290,7 +7304,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) & DRIVER_SENSE)
+ if (driver_byte(ret) == DRIVER_SENSE)
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f51758f1e5cc..33fdd3f281ae 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1043,4 +1043,7 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
}
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix);
+
#endif /* End of Header */
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0ebc4818e132..95d0a22b2ad6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1090,10 +1090,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-static struct se_portal_group *lio_target_tiqn_addtpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
+ const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 99501785cdc1..923b1a9fc3dc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -369,7 +369,7 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4b34f71547c6..101d62105c93 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -636,8 +636,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4435bf374d2d..49be1e41290c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/list.h>
-#include <linux/percpu_ida.h>
+#include <linux/sched/signal.h>
#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
@@ -147,6 +147,30 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
+static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
+{
+ int tag = -1;
+ DEFINE_WAIT(wait);
+ struct sbq_wait_state *ws;
+
+ if (state == TASK_RUNNING)
+ return tag;
+
+ ws = &se_sess->sess_tag_pool.ws[0];
+ for (;;) {
+ prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ if (signal_pending_state(state, current))
+ break;
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ finish_wait(&ws->wait, &wait);
+ return tag;
+}
+
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
@@ -155,9 +179,11 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
- int size, tag;
+ int size, tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ tag = iscsit_wait_for_tag(se_sess, state, &cpu);
if (tag < 0)
return NULL;
@@ -166,6 +192,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
memset(cmd, 0, size);
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
@@ -711,7 +738,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(sess->se_sess, se_cmd);
}
EXPORT_SYMBOL(iscsit_release_cmd);
@@ -1026,26 +1053,8 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
void iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
- /*
- * NOPIN timeout is disabled..
- */
- if (!na->nopin_timeout)
- return;
-
spin_lock_bh(&conn->nopin_timer_lock);
- if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
- spin_unlock_bh(&conn->nopin_timer_lock);
- return;
- }
-
- conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
- conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
-
- pr_debug("Started NOPIN Timer on CID: %d at %u second"
- " interval\n", conn->cid, na->nopin_timeout);
+ __iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 60d5b918c4ac..bc8918f382e4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,10 +239,7 @@ out:
return ret;
release:
- if (se_cmd)
- transport_generic_free_cmd(se_cmd, 0);
- else
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
goto out;
}
@@ -768,7 +765,7 @@ static int tcm_loop_make_nexus(
if (!tl_nexus)
return -ENOMEM;
- tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+ tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
@@ -808,7 +805,7 @@ static int tcm_loop_drop_nexus(
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
- transport_deregister_session(tl_nexus->se_sess);
+ target_remove_session(se_sess);
tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
@@ -983,10 +980,8 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-static struct se_portal_group *tcm_loop_make_naa_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index fb1003921d85..3d10189ecedc 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -209,7 +209,7 @@ static struct sbp_session *sbp_session_create(
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
- sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+ sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
@@ -235,8 +235,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
@@ -926,15 +925,16 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
req->se_cmd.tag = next_orb;
return req;
@@ -1460,7 +1460,7 @@ static void sbp_free_request(struct sbp_target_request *req)
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -2005,10 +2005,8 @@ static void sbp_pre_unlink_lun(
pr_err("unlink LUN: failed to update unit directory\n");
}
-static struct se_portal_group *sbp_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 5ccef7d597fa..f6b1549f4142 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -263,8 +263,8 @@ static struct config_group *target_core_register_fabric(
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
- pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
- " %s\n", tf->tf_group.cg_item.ci_name);
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
+ config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
@@ -810,7 +810,7 @@ static ssize_t pi_prot_type_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
@@ -859,7 +859,7 @@ static ssize_t pi_prot_format_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
@@ -1948,7 +1948,7 @@ static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
- return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+ return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
@@ -2473,7 +2473,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e27db4d45a9d..47b5ef153135 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -336,7 +336,6 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
- atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -879,39 +878,21 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
}
EXPORT_SYMBOL(target_to_linux_sector);
-/**
- * target_find_device - find a se_device by its dev_index
- * @id: dev_index
- * @do_depend: true if caller needs target_depend_item to be done
- *
- * If do_depend is true, the caller must do a target_undepend_item
- * when finished using the device.
- *
- * If do_depend is false, the caller must be called in a configfs
- * callback or during removal.
- */
-struct se_device *target_find_device(int id, bool do_depend)
-{
- struct se_device *dev;
-
- mutex_lock(&device_mutex);
- dev = idr_find(&devices_idr, id);
- if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
- dev = NULL;
- mutex_unlock(&device_mutex);
- return dev;
-}
-EXPORT_SYMBOL(target_find_device);
-
struct devices_idr_iter {
+ struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
+ __must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ int ret;
+
+ config_item_put(iter->prev_item);
+ iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
@@ -919,10 +900,18 @@ static int target_devices_idr_iter(int id, void *p, void *data)
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
- if (!(dev->dev_flags & DF_CONFIGURED))
+ if (!target_dev_configured(dev))
+ return 0;
+
+ iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!iter->prev_item)
return 0;
+ mutex_unlock(&device_mutex);
+
+ ret = iter->fn(dev, iter->data);
- return iter->fn(dev, iter->data);
+ mutex_lock(&device_mutex);
+ return ret;
}
/**
@@ -936,15 +925,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
- struct devices_idr_iter iter;
+ struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
- iter.fn = fn;
- iter.data = data;
-
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
+ config_item_put(iter.prev_item);
return ret;
}
@@ -953,7 +940,7 @@ int target_configure_device(struct se_device *dev)
struct se_hba *hba = dev->se_hba;
int ret, id;
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
@@ -1058,7 +1045,7 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
destroy_workqueue(dev->tmr_wq);
dev->transport->destroy_device(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e1416b007aa4..aa2f4f632ebe 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -34,6 +34,7 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
@@ -642,7 +643,7 @@ static int target_fabric_port_link(
}
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
@@ -841,7 +842,7 @@ static struct config_group *target_fabric_make_tpg(
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dead30b1d32c..0c6635587930 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-int transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b054682e974f..ebac2b49b9c6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -978,9 +978,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
case COMPARE_AND_WRITE:
if (!dev->dev_attrib.emulate_caw) {
- pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
- " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
- dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
+ dev->se_hba->backend->ops->name,
+ config_item_name(&dev->dev_group.cg_item),
+ dev->t10_wwn.unit_serial);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sectors = cdb[13];
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 9c7bc1ca341a..6d1179a7f043 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,25 +75,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
-{
- unsigned long flags;
- bool remove = true, send_tas;
- /*
- * TASK ABORTED status (TAS) bit support
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- send_tas = (cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (send_tas) {
- remove = false;
- transport_send_task_abort(cmd);
- }
-
- return transport_cmd_finish_abort(cmd, remove);
-}
-
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
@@ -142,7 +123,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
return false;
}
}
- if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
@@ -187,13 +168,12 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0))
continue;
- list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- if (!transport_cmd_finish_abort(se_cmd, true))
+ if (!transport_cmd_finish_abort(se_cmd))
target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
@@ -259,7 +239,7 @@ static void core_tmr_drain_tmr_list(
spin_unlock(&sess->sess_cmd_lock);
continue;
}
- if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
@@ -291,7 +271,7 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!transport_cmd_finish_abort(cmd, 1))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
@@ -380,7 +360,7 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!core_tmr_handle_tas_abort(cmd, tas))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ee5081ba5313..86c0156e6c88 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,7 +64,7 @@ struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
@@ -224,7 +224,27 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session - initialize a session object
+ * @se_sess: Session object pointer.
+ *
+ * The caller must have zero-initialized @se_sess before calling this function.
+ */
+void transport_init_session(struct se_session *se_sess)
+{
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+ init_waitqueue_head(&se_sess->cmd_list_wq);
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -234,17 +254,20 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&se_sess->sess_list);
- INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
- spin_lock_init(&se_sess->sess_cmd_lock);
+ transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session);
+EXPORT_SYMBOL(transport_alloc_session);
+/**
+ * transport_alloc_session_tags - allocate target driver private data
+ * @se_sess: Session pointer.
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ */
int transport_alloc_session_tags(struct se_session *se_sess,
unsigned int tag_num, unsigned int tag_size)
{
@@ -260,7 +283,8 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
}
- rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
+ false, GFP_KERNEL, NUMA_NO_NODE);
if (rc < 0) {
pr_err("Unable to init se_sess->sess_tag_pool,"
" tag_num: %u\n", tag_num);
@@ -273,9 +297,16 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
EXPORT_SYMBOL(transport_alloc_session_tags);
-struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size,
- enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session_tags - allocate a session and target driver private data
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+static struct se_session *
+transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
@@ -291,7 +322,7 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return ERR_PTR(-EINVAL);
}
- se_sess = transport_init_session(sup_prot_ops);
+ se_sess = transport_alloc_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -303,7 +334,6 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session_tags);
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
@@ -316,6 +346,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +383,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +392,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
@@ -385,7 +416,7 @@ void transport_register_session(
EXPORT_SYMBOL(transport_register_session);
struct se_session *
-target_alloc_session(struct se_portal_group *tpg,
+target_setup_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
@@ -401,7 +432,7 @@ target_alloc_session(struct se_portal_group *tpg,
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
- sess = transport_init_session(prot_op);
+ sess = transport_alloc_session(prot_op);
if (IS_ERR(sess))
return sess;
@@ -427,7 +458,7 @@ target_alloc_session(struct se_portal_group *tpg,
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
}
-EXPORT_SYMBOL(target_alloc_session);
+EXPORT_SYMBOL(target_setup_session);
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
@@ -547,7 +578,7 @@ void transport_free_session(struct se_session *se_sess)
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
- percpu_ida_destroy(&se_sess->sess_tag_pool);
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
kmem_cache_free(se_sess_cache, se_sess);
@@ -585,6 +616,13 @@ void transport_deregister_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_deregister_session);
+void target_remove_session(struct se_session *se_sess)
+{
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
+}
+EXPORT_SYMBOL(target_remove_session);
+
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -601,6 +639,13 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * This function is called by the target core after the target core has
+ * finished processing a SCSI command or SCSI TMF. Both the regular command
+ * processing code and the code for aborting commands can call this
+ * function. CMD_T_STOP is set if and only if another thread is waiting
+ * inside transport_wait_for_tasks() for t_transport_stop_comp.
+ */
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
@@ -650,23 +695,27 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd)
{
+ bool send_tas = cmd->transport_state & CMD_T_TAS;
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
int ret = 0;
+ if (send_tas)
+ transport_send_task_abort(cmd);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
* Allow the fabric driver to unmap any resources before
* releasing the descriptor via TFO->release_cmd()
*/
- if (remove)
+ if (!send_tas)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
- if (remove && ack_kref)
+ if (!send_tas && ack_kref)
ret = target_put_sess_cmd(cmd);
return ret;
@@ -1267,7 +1316,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
+ cmd->compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
@@ -2079,9 +2128,6 @@ static void transport_complete_qf(struct se_cmd *cmd)
if (cmd->scsi_status)
goto queue_status;
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
}
@@ -2593,20 +2639,37 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+/*
+ * This function is called by frontend drivers after processing of a command
+ * has finished.
+ *
+ * The protocol for ensuring that either the regular flow or the TMF
+ * code drops one reference is as follows:
+ * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
+ * the frontend driver to drop one reference, synchronously or asynchronously.
+ * - During regular command processing the target core sets CMD_T_COMPLETE
+ * before invoking one of the .queue_*() functions.
+ * - The code that aborts commands skips commands and TMFs for which
+ * CMD_T_COMPLETE has been set.
+ * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
+ * commands that will be aborted.
+ * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
+ * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
+ * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
+ * be called and will drop a reference.
+ * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
+ * will be called. transport_cmd_finish_abort() will drop the final reference.
+ */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
bool aborted = false, tas = false;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (wait_for_tasks)
+ target_wait_free_cmd(cmd, &aborted, &tas);
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
- } else {
- if (wait_for_tasks)
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
@@ -2617,20 +2680,14 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
-
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
}
- /*
- * If the task has been internally aborted due to TMR ABORT_TASK
- * or LUN_RESET, target_core_tmr.c is responsible for performing
- * the remaining calls to target_put_sess_cmd(), and not the
- * callers of this function.
- */
+ if (aborted)
+ cmd->compl = &compl;
+ if (!aborted || tas)
+ ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
- wait_for_completion(&cmd->cmd_wait_comp);
- cmd->se_tfo->release_cmd(cmd);
+ wait_for_completion(&compl);
ret = 1;
}
return ret;
@@ -2691,30 +2748,21 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
+ struct completion *compl = se_cmd->compl;
unsigned long flags;
- bool fabric_stop;
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
-
- if (se_cmd->cmd_wait_set || fabric_stop) {
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
- }
list_del_init(&se_cmd->se_cmd_list);
+ if (list_empty(&se_sess->sess_cmd_list))
+ wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
+ if (compl)
+ complete(compl);
}
/**
@@ -2833,78 +2881,41 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
EXPORT_SYMBOL(target_show_cmd);
/**
- * target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
+ * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
* @se_sess: session to flag
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
- int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
- }
se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- rc = kref_get_unless_zero(&se_cmd->cmd_kref);
- if (rc) {
- se_cmd->cmd_wait_set = 1;
- spin_lock(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
- } else
- list_del_init(&se_cmd->se_cmd_list);
- }
-
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/**
- * target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- bool tas;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- tas = (se_cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- if (!target_put_sess_cmd(se_cmd)) {
- if (tas)
- target_put_sess_cmd(se_cmd);
- }
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- se_cmd->se_tfo->release_cmd(se_cmd);
- }
+ struct se_cmd *cmd;
+ int ret;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ spin_lock_irq(&se_sess->sess_cmd_lock);
+ do {
+ ret = wait_event_interruptible_lock_irq_timeout(
+ se_sess->cmd_list_wq,
+ list_empty(&se_sess->sess_cmd_list),
+ se_sess->sess_cmd_lock, 180 * HZ);
+ list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ target_show_cmd("session shutdown: still waiting for ",
+ cmd);
+ } while (ret <= 0);
+ spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3166,12 +3177,23 @@ static const struct sense_info sense_info_table[] = {
},
};
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
+/**
+ * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
+ * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
+ * be stored.
+ * @reason: LIO sense reason code. If this argument has the value
+ * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
+ * dequeuing a unit attention fails due to multiple commands being processed
+ * concurrently, set the command status to BUSY.
+ *
+ * Return: 0 upon success or -EINVAL if the sense buffer is too small.
+ */
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_info *si;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
- u8 asc, ascq;
+ u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
@@ -3180,9 +3202,13 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
si = &sense_info_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+ key = si->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- WARN_ON_ONCE(asc == 0);
+ if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
+ &ascq)) {
+ cmd->scsi_status = SAM_STAT_BUSY;
+ return;
+ }
} else if (si->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
@@ -3192,13 +3218,14 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
ascq = si->ascq;
}
- scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (si->add_sector_info)
- return scsi_set_sense_information(buffer,
- cmd->scsi_sense_length,
- cmd->bad_sector);
-
- return 0;
+ WARN_ON_ONCE(scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->bad_sector) < 0);
}
int
@@ -3215,16 +3242,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!from_transport) {
- int rc;
-
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
- rc = translate_sense_reason(cmd, reason);
- if (rc)
- return rc;
- }
+ if (!from_transport)
+ translate_sense_reason(cmd, reason);
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index be25eb807a5f..c8ac242ce888 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
rcu_read_unlock();
return 0;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return 0;
}
@@ -154,7 +154,6 @@ int core_scsi3_ua_allocate(
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -164,7 +163,6 @@ int core_scsi3_ua_allocate(
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
@@ -196,16 +194,17 @@ void core_scsi3_ua_release_all(
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
}
-void core_scsi3_ua_for_check_condition(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+/*
+ * Dequeue a unit attention from the unit attention list. This function
+ * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
+ * been set.
+ */
+bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
+ u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
@@ -214,23 +213,23 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!sess)
- return;
+ if (WARN_ON_ONCE(!sess))
+ return false;
nacl = sess->se_node_acl;
- if (!nacl)
- return;
+ if (WARN_ON_ONCE(!nacl))
+ return false;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
- return;
- }
- if (!atomic_read(&deve->ua_count)) {
- rcu_read_unlock();
- return;
+ *key = ILLEGAL_REQUEST;
+ *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
+ *ascq = 0;
+ return true;
}
+ *key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
@@ -260,8 +259,6 @@ void core_scsi3_ua_for_check_condition(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
@@ -273,6 +270,8 @@ void core_scsi3_ua_for_check_condition(
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+
+ return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
@@ -299,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense(
rcu_read_unlock();
return -EINVAL;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return -EPERM;
}
@@ -322,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index b0f4205a96cd..76487c9be090 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -37,7 +37,8 @@ extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
-extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
+ u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d8dc3d22051f..9cd404acdb82 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -83,14 +83,10 @@
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_SHIFT PAGE_SHIFT
#define DATA_BLOCK_BITS_DEF (256 * 1024)
-#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
-/* The total size of the ring is 8M + 256K * PAGE_SIZE */
-#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
@@ -98,6 +94,7 @@
#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
@@ -107,9 +104,16 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
int cmd;
int status;
};
@@ -133,7 +137,7 @@ struct tcmu_dev {
struct inode *inode;
struct tcmu_mailbox *mb_addr;
- size_t dev_size;
+ uint64_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
@@ -161,10 +165,7 @@ struct tcmu_dev {
struct list_head timedout_entry;
- spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
- /* wake up threads waiting on curr_nl_cmd */
- wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
- struct se_device *dev;
- struct tcmu_dev *udev;
+ struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
- bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
- return -EINVAL;
+ return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
- dev = target_find_device(dev_id, !is_removed);
- if (!dev) {
- printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
- completed_cmd, rc, dev_id);
- return -ENODEV;
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
}
- udev = TCMU_DEV(dev);
- spin_lock(&udev->nl_cmd_lock);
- nl_cmd = &udev->curr_nl_cmd;
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
- pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
- nl_cmd->cmd, completed_cmd, rc);
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
- printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
- completed_cmd, nl_cmd->cmd);
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
- } else {
- nl_cmd->status = rc;
+ goto unlock;
}
- spin_unlock(&udev->nl_cmd_lock);
- if (!is_removed)
- target_undepend_item(&dev->dev_group.cg_item);
- if (!ret)
- complete(&nl_cmd->complete);
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -982,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
&udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
@@ -1282,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
@@ -1289,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- init_waitqueue_head(&udev->nl_cmd_wq);
- spin_lock_init(&udev->nl_cmd_lock);
-
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
return &udev->se_dev;
@@ -1565,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
return 0;
}
-static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
- return;
+ return 0;
if (udev->nl_reply_supported <= 0)
- return;
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
-relock:
- spin_lock(&udev->nl_cmd_lock);
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
- spin_unlock(&udev->nl_cmd_lock);
- pr_debug("sleeping for open nl cmd\n");
- wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
- goto relock;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
- spin_unlock(&udev->nl_cmd_lock);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
- DEFINE_WAIT(__wait);
if (!tcmu_kern_cmd_reply_supported)
return 0;
@@ -1607,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
- spin_lock(&udev->nl_cmd_lock);
+ mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
- nl_cmd->status = 0;
- spin_unlock(&udev->nl_cmd_lock);
-
- wake_up_all(&udev->nl_cmd_wq);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -1657,19 +1750,21 @@ free_skb:
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
- struct sk_buff **buf, void **hdr)
+ struct sk_buff *skb, void *msg_header)
{
- int ret = 0;
- struct sk_buff *skb = *buf;
- void *msg_header = *hdr;
+ int ret;
genlmsg_end(skb, msg_header);
- tcmu_init_genl_cmd_reply(udev, cmd);
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
@@ -1687,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
&msg_header);
if (ret < 0)
return ret;
- return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
- &msg_header);
-
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
@@ -1703,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
@@ -1745,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1842,11 +1938,6 @@ err_bitmap_alloc:
return ret;
}
-static bool tcmu_dev_configured(struct tcmu_dev *udev)
-{
- return udev->uio_info.uio_dev ? true : false;
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1953,45 +2044,76 @@ enum {
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
- {Opt_dev_size, "dev_size=%u"},
- {Opt_hw_block_size, "hw_block_size=%u"},
- {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
- unsigned long tmp_ul;
- char *arg_p;
- int ret;
+ int val, ret;
- arg_p = match_strdup(arg);
- if (!arg_p)
- return -ENOMEM;
-
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
+ ret = match_int(arg, &val);
if (ret < 0) {
- pr_err("kstrtoul() failed for dev attrib\n");
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
return ret;
}
- if (!tmp_ul) {
- pr_err("dev attrib must be nonzero\n");
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
return -EINVAL;
}
- *dev_attrib = tmp_ul;
+ *dev_attrib = val;
return 0;
}
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- char *orig, *ptr, *opts, *arg_p;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -2014,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
- kfree(arg_p);
+ ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
- pr_err("kstrtoul() failed for dev_size=\n");
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
@@ -2033,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
- kfree(arg_p);
+ ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
- pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
@@ -2095,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
TCMU_BLOCKS_TO_MBS(udev->max_blocks));
@@ -2222,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
@@ -2239,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
return -EINVAL;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2264,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
@@ -2284,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
@@ -2301,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2366,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
@@ -2383,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2419,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2446,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2510,6 +2602,11 @@ static void find_free_blocks(void)
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
/* Try to complete the finished commands first */
tcmu_handle_completions(udev);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 9ee89e00cd77..2718a933c0c6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -497,10 +497,7 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
- spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+ transport_init_session(&xcopy_pt_sess);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ec372860106f..a183d4da7db2 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,7 +28,6 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
-#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
@@ -92,7 +91,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
ft_sess_put(sess); /* undo get from lookup at recv */
}
@@ -448,9 +447,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
goto busy;
@@ -458,10 +457,11 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
- percpu_ida_free(&se_sess->sess_tag_pool, tag);
+ target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 42ee91123dca..e55c4d537592 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -223,10 +223,7 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
/*
* local_port port_group (tpg) ops.
*/
-static struct se_portal_group *ft_add_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index c91979c1463d..6d4adf5ec26c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -239,7 +239,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess->tport = tport;
sess->port_id = port_id;
- sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
@@ -287,7 +287,6 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_close_sess(struct ft_sess *sess)
{
- transport_deregister_session_configfs(sess->se_sess);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
@@ -448,7 +447,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d78dbb73bde8..106988a6661a 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1071,15 +1071,16 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
{
struct se_session *se_sess = tv_nexus->tvn_se_sess;
struct usbg_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
@@ -1288,7 +1289,7 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
kfree(cmd->data_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 usbg_sess_get_index(struct se_session *se_sess)
@@ -1343,10 +1344,8 @@ static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
return 0;
}
-static struct se_portal_group *usbg_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
tport_wwn);
@@ -1379,7 +1378,7 @@ static struct se_portal_group *usbg_make_tpg(
goto unlock_dep;
} else {
ret = configfs_depend_item_unlocked(
- group->cg_subsys,
+ wwn->wwn_group.cg_subsys,
&opts->func_inst.group.cg_item);
if (ret)
goto unlock_dep;
@@ -1593,7 +1592,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
USB_G_DEFAULT_SESSION_TAGS,
sizeof(struct usbg_cmd),
TARGET_PROT_NORMAL, name,
@@ -1639,7 +1638,7 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vHost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 17fcd3b2e686..76f8d649147b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -46,7 +46,6 @@
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
-#include <linux/percpu_ida.h>
#include "vhost.h"
@@ -324,7 +323,7 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
}
vhost_scsi_put_inflight(tv_cmd->inflight);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
@@ -567,7 +566,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct se_session *se_sess;
struct scatterlist *sg, *prot_sg;
struct page **pages;
- int tag;
+ int tag, cpu;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
@@ -576,7 +575,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
}
se_sess = tv_nexus->tvn_se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -591,6 +590,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
+ cmd->tvc_se_cmd.map_cpu = cpu;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
@@ -1738,7 +1738,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VHOST_SCSI_DEFAULT_TAGS,
sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
@@ -1797,7 +1797,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1912,9 +1912,7 @@ static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
};
static struct se_portal_group *
-vhost_scsi_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e2f3e8b0fba9..14a3d4cbc2a7 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -654,9 +654,9 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_session *se_sess = nexus->tvn_se_sess;
struct vscsibk_pend *req;
- int tag, i;
+ int tag, cpu, i;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vscsiif_request\n");
return ERR_PTR(-ENOMEM);
@@ -665,6 +665,7 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
for (i = 0; i < VSCSI_MAX_GRANTS; i++)
req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
@@ -1387,9 +1388,7 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
- struct se_session *se_sess = se_cmd->se_sess;
-
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_cmd->se_sess, se_cmd);
}
static u32 scsiback_sess_get_index(struct se_session *se_sess)
@@ -1532,7 +1531,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VSCSI_DEFAULT_SESSION_TAGS,
sizeof(struct vscsibk_pend),
TARGET_PROT_NORMAL, name,
@@ -1587,7 +1586,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1743,9 +1742,7 @@ static void scsiback_port_unlink(struct se_portal_group *se_tpg,
}
static struct se_portal_group *
-scsiback_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+scsiback_make_tpg(struct se_wwn *wwn, const char *name)
{
struct scsiback_tport *tport = container_of(wwn,
struct scsiback_tport, tport_wwn);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 052e5ad9a4d2..0a7252aecfa5 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -410,6 +410,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct kernfs_node *kn;
+
+ kobject_get(kobj);
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
+ return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+ struct kobject *kobj = kn->parent->priv;
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
* sysfs_remove_file_ns - remove an object attribute with a custom ns tag
* @kobj: object we're acting for
* @attr: attribute descriptor
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 32f247cb5e9e..bc4f87cbe7f4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1111,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index 07d78e4653bc..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERCPU_IDA_H__
-#define __PERCPU_IDA_H__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <linux/cpumask.h>
-
-struct percpu_ida_cpu;
-
-struct percpu_ida {
- /*
- * number of tags available to be allocated, as passed to
- * percpu_ida_init()
- */
- unsigned nr_tags;
- unsigned percpu_max_size;
- unsigned percpu_batch_size;
-
- struct percpu_ida_cpu __percpu *tag_cpu;
-
- /*
- * Bitmap of cpus that (may) have tags on their percpu freelists:
- * steal_tags() uses this to decide when to steal tags, and which cpus
- * to try stealing from.
- *
- * It's ok for a freelist to be empty when its bit is set - steal_tags()
- * will just keep looking - but the bitmap _must_ be set whenever a
- * percpu freelist does have tags.
- */
- cpumask_t cpus_have_tags;
-
- struct {
- spinlock_t lock;
- /*
- * When we go to steal tags from another cpu (see steal_tags()),
- * we want to pick a cpu at random. Cycling through them every
- * time we steal is a bit easier and more or less equivalent:
- */
- unsigned cpu_last_stolen;
-
- /* For sleeping on allocation failure */
- wait_queue_head_t wait;
-
- /*
- * Global freelist - it's a stack where nr_free points to the
- * top
- */
- unsigned nr_free;
- unsigned *freelist;
- } ____cacheline_aligned_in_smp;
-};
-
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
-/* Max size of percpu freelist, */
-#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
-
-int percpu_ida_alloc(struct percpu_ida *pool, int state);
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
-
-void percpu_ida_destroy(struct percpu_ida *pool);
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size);
-static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
-{
- return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
- IDA_DEFAULT_PCPU_BATCH_MOVE);
-}
-
-typedef int (*percpu_ida_cb)(unsigned, void *);
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data);
-
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
-#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e6539536dea9..804a50983ec5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -23,6 +23,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+struct seq_file;
+
/**
* struct sbitmap_word - Word in a &struct sbitmap.
*/
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b8bfdc173ec0..3c12198c0103 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 225ab7783dfd..3de3b10da19a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,7 +161,7 @@ struct sata_device {
u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
- struct ata_host ata_host;
+ struct ata_host *ata_host;
struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53b485fe9b67..5ea06d310a25 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cf5f3fff1f1a..f2e6abea8490 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h> /* enum dma_data_direction */
#include <linux/list.h> /* struct list_head */
+#include <linux/sched.h>
#include <linux/socket.h> /* struct sockaddr_storage */
#include <linux/types.h> /* u8 */
#include <scsi/iscsi_proto.h> /* itt_t */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 34a15d59ed88..51b6f50eabee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
-struct se_device *target_find_device(int id, bool do_depend);
-
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+static inline bool target_dev_configured(struct se_device *se_dev)
+{
+ return !!(se_dev->dev_flags & DF_CONFIGURED);
+}
/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
static inline uint32_t get_unaligned_be24(const uint8_t *const p)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 922a39f45abc..7a4ee7852ca4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -4,7 +4,7 @@
#include <linux/configfs.h> /* struct config_group */
#include <linux/dma-direction.h> /* enum dma_data_direction */
-#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/sbitmap.h>
#include <linux/percpu-refcount.h>
#include <linux/semaphore.h> /* struct semaphore */
#include <linux/completion.h>
@@ -443,7 +443,6 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
- unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
@@ -455,6 +454,7 @@ struct se_cmd {
int sam_task_attr;
/* Used for se_sess->sess_tag_pool */
unsigned int map_tag;
+ int map_cpu;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* See se_cmd_flags_table */
@@ -475,7 +475,7 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion cmd_wait_comp;
+ struct completion *compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -605,10 +605,10 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
+ wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
- struct percpu_ida sess_tag_pool;
+ struct sbitmap_queue sess_tag_pool;
};
struct se_device;
@@ -638,7 +638,6 @@ struct se_dev_entry {
atomic_long_t total_cmds;
atomic_long_t read_bytes;
atomic_long_t write_bytes;
- atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v)
smp_mb__after_atomic();
}
+static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd)
+{
+ sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu);
+}
+
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b297aa0d9651..f4147b398431 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -79,7 +79,7 @@ struct target_core_fabric_ops {
void (*fabric_drop_wwn)(struct se_wwn *);
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
- struct config_group *, const char *);
+ const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
-struct se_session *target_alloc_session(struct se_portal_group *,
+struct se_session *target_setup_session(struct se_portal_group *,
unsigned int, unsigned int, enum target_prot_op prot_op,
const char *, void *,
int (*callback)(struct se_portal_group *,
struct se_session *, void *));
+void target_remove_session(struct se_session *);
-struct se_session *transport_init_session(enum target_prot_op);
+void transport_init_session(struct se_session *);
+struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int,
- enum target_prot_op);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
diff --git a/lib/Makefile b/lib/Makefile
index ff3a397bbb12..d95bb2525101 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index beb14839b41a..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag = -ENOSPC;
-
- tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock_irqsave(&tags->lock, flags);
-
- /* Fastpath */
- if (likely(tags->nr_free)) {
- tag = tags->freelist[--tags->nr_free];
- spin_unlock_irqrestore(&tags->lock, flags);
- return tag;
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- while (1) {
- spin_lock_irqsave(&pool->lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock_irqrestore(&pool->lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- tags = raw_cpu_ptr(pool->tag_cpu);
-
- spin_lock_irqsave(&tags->lock, flags);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock_irqsave(&pool->lock, flags);
- spin_lock(&tags->lock);
-
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&tags->lock);
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock_irqsave(&remote->lock, flags);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&remote->lock, flags);
- if (err)
- goto out;
- }
-
- spin_lock_irqsave(&pool->lock, flags);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);