summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-14 18:25:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-14 18:25:53 -0700
commit113d1dd9c8ea2186d56a641a787e2588673c9c32 (patch)
tree1e844eb68b7c8659aa567b0dcddba0e41280f552
parentb2665fe61d8a51ef70b27e1a830635a72dcc6ad8 (diff)
parent3668651def2c1622904e58b0280ee93121f2b10b (diff)
downloadlinux-113d1dd9c8ea2186d56a641a787e2588673c9c32.tar.gz
linux-113d1dd9c8ea2186d56a641a787e2588673c9c32.tar.bz2
linux-113d1dd9c8ea2186d56a641a787e2588673c9c32.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Updates to the usual drivers (ufs, lpfc, qla2xxx, mpi3mr, libsas). The major update (which causes a conflict with block, see below) is Christoph removing the queue limits and their associated block helpers. The remaining patches are assorted minor fixes and deprecated function updates plus a bit of constification" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (141 commits) scsi: mpi3mr: Sanitise num_phys scsi: lpfc: Copyright updates for 14.4.0.2 patches scsi: lpfc: Update lpfc version to 14.4.0.2 scsi: lpfc: Add support for 32 byte CDBs scsi: lpfc: Change lpfc_hba hba_flag member into a bitmask scsi: lpfc: Introduce rrq_list_lock to protect active_rrq_list scsi: lpfc: Clear deferred RSCN processing flag when driver is unloading scsi: lpfc: Update logging of protection type for T10 DIF I/O scsi: lpfc: Change default logging level for unsolicited CT MIB commands scsi: target: Remove unused list 'device_list' scsi: iscsi: Remove unused list 'connlist_err' scsi: ufs: exynos: Add support for Tensor gs101 SoC scsi: ufs: exynos: Add some pa_dbg_ register offsets into drvdata scsi: ufs: exynos: Allow max frequencies up to 267Mhz scsi: ufs: exynos: Add EXYNOS_UFS_OPT_TIMER_TICK_SELECT option scsi: ufs: exynos: Add EXYNOS_UFS_OPT_UFSPR_SECURE option scsi: ufs: dt-bindings: exynos: Add gs101 compatible scsi: qla2xxx: Fix debugfs output for fw_resource_count scsi: qedf: Ensure the copied buf is NUL terminated scsi: bfa: Ensure the copied buf is NUL terminated ...
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml38
-rw-r--r--Documentation/driver-api/scsi.rst15
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst22
-rw-r--r--MAINTAINERS3
-rw-r--r--block/blk-settings.c245
-rw-r--r--block/bsg-lib.c6
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/libata-sata.c171
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/pata_macio.c11
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c24
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/firewire/sbp2.c13
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptspi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/scsi/FlashPoint.c1
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/a3000.c8
-rw-r--r--drivers/scsi/a4000t.c8
-rw-r--r--drivers/scsi/aha152x.c8
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx75
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx97
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c29
-rw-r--r--drivers/scsi/atari_scsi.c8
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c18
-rw-r--r--drivers/scsi/cxlflash/superpipe.c40
-rw-r--r--drivers/scsi/cxlflash/superpipe.h11
-rw-r--r--drivers/scsi/cxlflash/vlun.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c31
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c5
-rw-r--r--drivers/scsi/imm.c12
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/isci/init.c29
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c84
-rw-r--r--drivers/scsi/libsas/sas_expander.c38
-rw-r--r--drivers/scsi/libsas/sas_internal.h15
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h62
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c133
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c119
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c63
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h32
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c218
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c8
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid113
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c29
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h15
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c33
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c42
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c86
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c18
-rw-r--r--drivers/scsi/mvsas/mv_init.c26
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pmcraid.c11
-rw-r--r--drivers/scsi/ppa.c8
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c12
-rw-r--r--drivers/scsi/qla2xxx/Kconfig42
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c14
-rw-r--r--drivers/scsi/scsi_debugfs.c56
-rw-r--r--drivers/scsi/scsi_devinfo.c18
-rw-r--r--drivers/scsi/scsi_lib.c40
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c5
-rw-r--r--drivers/scsi/scsi_transport_fc.c15
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/ses.c1
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/scsi/snic/snic_attrs.c11
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/staging/rts5208/rtsx.c24
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/ufs/core/ufs-mcq.c3
-rw-r--r--drivers/ufs/core/ufs_bsg.c3
-rw-r--r--drivers/ufs/core/ufshcd.c370
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c2
-rw-r--r--drivers/ufs/host/ufs-exynos.c205
-rw-r--r--drivers/ufs/host/ufs-exynos.h24
-rw-r--r--drivers/ufs/host/ufs-mediatek-sip.h94
-rw-r--r--drivers/ufs/host/ufs-mediatek.c131
-rw-r--r--drivers/ufs/host/ufs-mediatek.h90
-rw-r--r--drivers/ufs/host/ufs-qcom.c25
-rw-r--r--drivers/ufs/host/ufs-qcom.h12
-rw-r--r--drivers/usb/image/microtek.c8
-rw-r--r--drivers/usb/storage/scsiglue.c57
-rw-r--r--drivers/usb/storage/uas.c29
-rw-r--r--drivers/usb/storage/usb.c10
-rw-r--r--include/linux/blkdev.h27
-rw-r--r--include/linux/bsg-lib.h3
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/scsi/iser.h2
-rw-r--r--include/scsi/libfc.h18
-rw-r--r--include/scsi/libfcoe.h25
-rw-r--r--include/scsi/libsas.h32
-rw-r--r--include/scsi/sas_ata.h6
-rw-r--r--include/scsi/scsi.h12
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_driver.h4
-rw-r--r--include/scsi/scsi_host.h9
-rw-r--r--include/scsi/scsi_transport.h2
-rw-r--r--include/scsi/scsi_transport_fc.h6
-rw-r--r--include/scsi/scsi_transport_srp.h4
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h8
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h4
-rw-r--r--include/ufs/ufshcd.h3
-rw-r--r--include/ufs/ufshci.h13
154 files changed, 2168 insertions, 2037 deletions
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index b2b509b3944d..720879820f66 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -12,12 +12,10 @@ maintainers:
description: |
Each Samsung UFS host controller instance should have its own node.
-allOf:
- - $ref: ufs-common.yaml
-
properties:
compatible:
enum:
+ - google,gs101-ufs
- samsung,exynos7-ufs
- samsung,exynosautov9-ufs
- samsung,exynosautov9-ufs-vh
@@ -38,14 +36,24 @@ properties:
- const: ufsp
clocks:
+ minItems: 2
items:
- description: ufs link core clock
- description: unipro main clock
+ - description: fmp clock
+ - description: ufs aclk clock
+ - description: ufs pclk clock
+ - description: sysreg clock
clock-names:
+ minItems: 2
items:
- const: core_clk
- const: sclk_unipro_main
+ - const: fmp
+ - const: aclk
+ - const: pclk
+ - const: sysreg
phys:
maxItems: 1
@@ -72,6 +80,30 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: ufs-common.yaml
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: google,gs101-ufs
+
+ then:
+ properties:
+ clocks:
+ minItems: 6
+
+ clock-names:
+ minItems: 6
+
+ else:
+ properties:
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index ec92ea2c82cd..273281474c09 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -20,7 +20,7 @@ Although the old parallel (fast/wide/ultra) SCSI bus has largely fallen
out of use, the SCSI command set is more widely used than ever to
communicate with devices over a number of different busses.
-The `SCSI protocol <http://www.t10.org/scsi-3.htm>`__ is a big-endian
+The `SCSI protocol <https://www.t10.org/scsi-3.htm>`__ is a big-endian
peer-to-peer packet based protocol. SCSI commands are 6, 10, 12, or 16
bytes long, often followed by an associated data payload.
@@ -28,8 +28,7 @@ SCSI commands can be transported over just about any kind of bus, and
are the default protocol for storage devices attached to USB, SATA, SAS,
Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also
commonly exchanged over Infiniband,
-`I2O <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
-(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
+TCP/IP (`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
ports <http://cyberelk.net/tim/parport/parscsi.html>`__.
Design of the Linux SCSI subsystem
@@ -170,9 +169,9 @@ drivers/scsi/scsi_netlink.c
Infrastructure to provide async events from transports to userspace via
netlink, using a single NETLINK_SCSITRANSPORT protocol for all
-transports. See `the original patch
-submission <http://marc.info/?l=linux-scsi&m=115507374832500&w=2>`__ for
-more details.
+transports. See `the original patch submission
+<https://lore.kernel.org/linux-scsi/1155070439.6275.5.camel@localhost.localdomain/>`__
+for more details.
.. kernel-doc:: drivers/scsi/scsi_netlink.c
:internal:
@@ -328,11 +327,11 @@ the ordinary is seen.
To be more realistic, the simulated devices have the transport
attributes of SAS disks.
-For documentation see http://sg.danny.cz/sg/sdebug26.html
+For documentation see http://sg.danny.cz/sg/scsi_debug.html
todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I2O, Parallel ports,
+FireWire, ATAPI devices, Infiniband, Parallel ports,
netlink...
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 022198c51350..2df29b92e196 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -42,18 +42,18 @@ This version of the document roughly matches linux kernel version 2.6.8 .
Documentation
=============
There is a SCSI documentation directory within the kernel source tree,
-typically Documentation/scsi . Most documents are in plain
-(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
+typically Documentation/scsi . Most documents are in reStructuredText
+format. This file is named scsi_mid_low_api.rst and can be
found in that directory. A more recent copy of this document may be found
-at http://web.archive.org/web/20070107183357rn_1/sg.torque.net/scsi/.
-Many LLDs are documented there (e.g. aic7xxx.txt). The SCSI mid-level is
-briefly described in scsi.txt which contains a url to a document
-describing the SCSI subsystem in the lk 2.4 series. Two upper level
-drivers have documents in that directory: st.txt (SCSI tape driver) and
-scsi-generic.txt (for the sg driver).
-
-Some documentation (or urls) for LLDs may be found in the C source code
-or in the same directory as the C source code. For example to find a url
+at https://docs.kernel.org/scsi/scsi_mid_low_api.html. Many LLDs are
+documented in Documentation/scsi (e.g. aic7xxx.rst). The SCSI mid-level is
+briefly described in scsi.rst which contains a URL to a document describing
+the SCSI subsystem in the Linux Kernel 2.4 series. Two upper level
+drivers have documents in that directory: st.rst (SCSI tape driver) and
+scsi-generic.rst (for the sg driver).
+
+Some documentation (or URLs) for LLDs may be found in the C source code
+or in the same directory as the C source code. For example to find a URL
about the USB mass storage driver see the
/usr/src/linux/drivers/usb/storage directory.
diff --git a/MAINTAINERS b/MAINTAINERS
index 9d7a488b188d..39c0517bbd23 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5828,10 +5828,9 @@ F: include/uapi/misc/cxl.h
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
M: Manoj N. Kumar <manoj@linux.ibm.com>
-M: Matthew R. Ochs <mrochs@linux.ibm.com>
M: Uma Krishnan <ukrishn@linux.ibm.com>
L: linux-scsi@vger.kernel.org
-S: Supported
+S: Obsolete
F: Documentation/arch/powerpc/cxlflash.rst
F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctl.h
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ebba05a2bc7f..a7fe8e90240a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -283,72 +283,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
EXPORT_SYMBOL_GPL(queue_limits_set);
/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @bounce: bounce limit to enforce
- *
- * Description:
- * Force bouncing for ISA DMA ranges or highmem.
- *
- * DEPRECATED, don't use in new code.
- **/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-{
- q->limits.bounce = bounce;
-}
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
- * @max_hw_sectors: max hardware sectors in the usual 512b unit
- *
- * Description:
- * Enables a low level driver to set a hard upper limit,
- * max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the capabilities of the I/O
- * controller.
- *
- * max_dev_sectors is a hard limit imposed by the storage device for
- * READ/WRITE requests. It is set by the disk driver.
- *
- * max_sectors is a soft limit imposed by the block layer for
- * filesystem type requests. This value can be overridden on a
- * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
- * The soft limit can not exceed max_hw_sectors.
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
- struct queue_limits *limits = &q->limits;
- unsigned int max_sectors;
-
- if ((max_hw_sectors << 9) < PAGE_SIZE) {
- max_hw_sectors = 1 << (PAGE_SHIFT - 9);
- pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
- }
-
- max_hw_sectors = round_down(max_hw_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_hw_sectors = max_hw_sectors;
-
- max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
-
- if (limits->max_user_sectors)
- max_sectors = min(max_sectors, limits->max_user_sectors);
- else
- max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
-
- max_sectors = round_down(max_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_sectors = max_sectors;
-
- if (!q->disk)
- return;
- q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
-}
-EXPORT_SYMBOL(blk_queue_max_hw_sectors);
-
-/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
@@ -443,65 +377,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
/**
- * blk_queue_max_segments - set max hw segments for a request for this queue
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * hw data segments in a request.
- **/
-void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-{
- if (!max_segments) {
- max_segments = 1;
- pr_info("%s: set to minimum %u\n", __func__, max_segments);
- }
-
- q->limits.max_segments = max_segments;
-}
-EXPORT_SYMBOL(blk_queue_max_segments);
-
-/**
- * blk_queue_max_discard_segments - set max segments for discard requests
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * segments in a discard request.
- **/
-void blk_queue_max_discard_segments(struct request_queue *q,
- unsigned short max_segments)
-{
- q->limits.max_discard_segments = max_segments;
-}
-EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
-
-/**
- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
- * @q: the request queue for the device
- * @max_size: max size of segment in bytes
- *
- * Description:
- * Enables a low level driver to set an upper limit on the size of a
- * coalesced segment
- **/
-void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-{
- if (max_size < PAGE_SIZE) {
- max_size = PAGE_SIZE;
- pr_info("%s: set to minimum %u\n", __func__, max_size);
- }
-
- /* see blk_queue_virt_boundary() for the explanation */
- WARN_ON_ONCE(q->limits.virt_boundary_mask);
-
- q->limits.max_segment_size = max_size;
-}
-EXPORT_SYMBOL(blk_queue_max_segment_size);
-
-/**
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the logical block size, in bytes
@@ -667,29 +542,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
}
EXPORT_SYMBOL(blk_limits_io_opt);
-/**
- * blk_queue_io_opt - set optimal request size for the queue
- * @q: the request queue for the device
- * @opt: optimal request size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-{
- blk_limits_io_opt(&q->limits, opt);
- if (!q->disk)
- return;
- q->disk->bdi->ra_pages =
- max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
-}
-EXPORT_SYMBOL(blk_queue_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
@@ -940,81 +792,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
- * blk_queue_segment_boundary - set boundary rules for segment merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-{
- if (mask < PAGE_SIZE - 1) {
- mask = PAGE_SIZE - 1;
- pr_info("%s: set to minimum %lx\n", __func__, mask);
- }
-
- q->limits.seg_boundary_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_segment_boundary);
-
-/**
- * blk_queue_virt_boundary - set boundary rules for bio merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-{
- q->limits.virt_boundary_mask = mask;
-
- /*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be idential to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
- */
- if (mask)
- q->limits.max_segment_size = UINT_MAX;
-}
-EXPORT_SYMBOL(blk_queue_virt_boundary);
-
-/**
- * blk_queue_dma_alignment - set dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * set required memory and length alignment for direct dma transactions.
- * this is used when building direct io requests for the queue.
- *
- **/
-void blk_queue_dma_alignment(struct request_queue *q, int mask)
-{
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_alignment);
-
-/**
- * blk_queue_update_dma_alignment - update dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * update required memory and length alignment for direct dma transactions.
- * If the requested alignment is larger than the current alignment, then
- * the current queue alignment is updated to the new value, otherwise it
- * is left alone. The design of this is to allow multiple objects
- * (driver, device, transport etc) to set their respective
- * alignments without having them interfere.
- *
- **/
-void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-{
- BUG_ON(mask > PAGE_SIZE);
-
- if (mask > q->limits.dma_alignment)
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-
-/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
* @depth: queue depth
@@ -1052,28 +829,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
/**
- * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
- * @q: the request queue for the device
- * @dev: the device pointer for dma
- *
- * Tell the block layer about merging the segments by dma map of @q.
- */
-bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev)
-{
- unsigned long boundary = dma_get_merge_boundary(dev);
-
- if (!boundary)
- return false;
-
- /* No need to update max_segment_size. see blk_queue_virt_boundary() */
- blk_queue_virt_boundary(q, boundary);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
-
-/**
* disk_set_zoned - inidicate a zoned device
* @disk: gendisk to configure
*/
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index bcc7dee6abce..ee738d129a9f 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = {
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
* @name: device to give bsg device
+ * @lim: queue limits for the bsg queue
* @job_fn: bsg job handler
* @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size)
{
struct bsg_set *bset;
struct blk_mq_tag_set *set;
@@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set))
goto out_tag_set;
- q = blk_mq_alloc_queue(set, NULL, NULL);
+ q = blk_mq_alloc_queue(set, lim, NULL);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out_queue;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 344c87210d8f..8f40f75ba08c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 0fb1934875f2..9e047bf912b1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_store);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-static ssize_t ata_ncq_prio_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_supported - Check if device supports NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @supported: Address of a boolean to store the result
+ *
+ * Helper to check if device supports NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @supported
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_supported;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
- spin_unlock_irq(ap->lock);
+ *supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_supported);
+
+static ssize_t ata_ncq_prio_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool supported;
+ int rc;
+
+ rc = ata_ncq_prio_supported(ap, sdev, &supported);
+ if (rc)
+ return rc;
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
+ return sysfs_emit(buf, "%d\n", supported);
}
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_enabled - Check if NCQ Priority is enabled
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enabled: Address of a boolean to store the result
+ *
+ * Helper to check if NCQ Priority feature is enabled.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_enable;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
- spin_unlock_irq(ap->lock);
+ *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
+ return rc;
}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled);
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- long int input;
- int rc = 0;
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enabled;
+ int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = ata_ncq_prio_enabled(ap, sdev, &enabled);
if (rc)
return rc;
- if ((input < 0) || (input > 1))
- return -EINVAL;
- ap = ata_shost_to_port(sdev->host);
- dev = ata_scsi_find_dev(ap, sdev);
- if (unlikely(!dev))
- return -ENODEV;
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+
+/**
+ * ata_ncq_prio_enable - Enable/disable NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enable: true - enable NCQ Priority, false - disable NCQ Priority
+ *
+ * Helper to enable/disable NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ * * %-EINVAL - NCQ Priority is not supported or CDL is enabled
+ */
+int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(ap->lock, flags);
- spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
rc = -EINVAL;
goto unlock;
}
- if (input) {
+ if (enable) {
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
ata_dev_err(dev,
"CDL must be disabled to enable NCQ priority\n");
@@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
}
unlock:
- spin_unlock_irq(ap->lock);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enable);
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enable;
+ int rc;
+
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
+
+ rc = ata_ncq_prio_enable(ap, sdev, enable);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
@@ -1170,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * ata_sas_device_configure - Default device_configure routine for libata
+ * devices
* @sdev: SCSI device to configure
+ * @lim: queue limits
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
- return ata_scsi_dev_config(sdev, ap->link.device);
+ return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+EXPORT_SYMBOL_GPL(ata_sas_device_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e954976891a9..cdf29b178ddc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1021,7 +1021,8 @@ bool ata_scsi_dma_need_drain(struct request *rq)
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
int depth = 1;
@@ -1031,7 +1032,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/* configure max sectors */
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
- blk_queue_max_hw_sectors(q, dev->max_sectors);
+ lim->max_hw_sectors = dev->max_sectors;
if (dev->class == ATA_DEV_ATAPI) {
sdev->sector_size = ATA_SECT_SIZE;
@@ -1040,7 +1041,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
/* make room for appending the drain */
- blk_queue_max_segments(q, queue_max_segments(q) - 1);
+ lim->max_segments--;
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
@@ -1077,7 +1078,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
- blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+ lim->dma_alignment = sdev->sector_size - 1;
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1131,8 +1132,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
/**
- * ata_scsi_slave_config - Set SCSI device attributes
+ * ata_scsi_device_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
+ * @lim: queue limits
*
* This is called before we actually start reading
* and writing to the device, to configure certain
@@ -1142,17 +1144,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_config(struct scsi_device *sdev)
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
if (dev)
- return ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, lim, dev);
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 1a9881dc2aa1..38ce13b55474 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 88b2e9817f49..817838e2f70e 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_slave_config(struct scsi_device *sdev)
+static int pata_macio_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
int rc;
/* First call original */
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (rc)
return rc;
@@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
- blk_queue_update_dma_alignment(sdev->request_queue, 31);
+ lim->dma_alignment = 31;
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
@@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
- blk_queue_update_dma_alignment(sdev->request_queue, 15);
+ lim->dma_alignment = 15;
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
@@ -918,7 +919,7 @@ static const struct scsi_host_template pata_macio_sht = {
* use 64K minus 256
*/
.max_segment_size = MAX_DBDMA_SEG,
- .slave_configure = pata_macio_slave_config,
+ .device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9bec0aee92e0..05c905827dc5 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0a0cee755bde..36d99043ef50 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .slave_configure = nv_adma_slave_config,
+ .device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = nv_swncq_slave_config,
+ .device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_slave_config(struct scsi_device *sdev)
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
}
- blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
- blk_queue_max_segments(sdev->request_queue, sg_tablesize);
+ lim->seg_boundary_mask = segment_boundary;
+ lim->max_segments = sg_tablesize;
ata_port_info(ap,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,
@@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_slave_config(struct scsi_device *sdev)
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 142e70bfc498..72c03cbdaff4 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = {
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations sil24_ops = {
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index e779d866022b..827dee0f57dd 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1500,19 +1500,14 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
- /*
- * SBP-2 does not require any alignment, but we set it anyway
- * for compatibility with earlier versions of this driver.
- */
- blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
return 0;
}
-static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+static int sbp2_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1538,7 +1533,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->start_stop_pwr_cond = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
- blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
+ lim->max_hw_sectors = 128 * 1024 / 512;
return 0;
}
@@ -1596,7 +1591,7 @@ static const struct scsi_host_template scsi_driver_template = {
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
.slave_alloc = sbp2_scsi_slave_alloc,
- .slave_configure = sbp2_scsi_slave_configure,
+ .device_configure = sbp2_scsi_device_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c459f709107b..a3c17c4fe69c 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -129,6 +129,7 @@ static const struct scsi_host_template mptfc_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
};
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 0f80c840afc3..a0bcb0864ecd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2020,6 +2020,7 @@ static const struct scsi_host_template mptsas_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
.no_write_same = 1,
};
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 9080a73b4ea6..6c3f25cc33ff 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2438,8 +2438,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
"tagged %d, simple %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags));
- blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
-
return 0;
}
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 6c5920db1e9d..574b882c9a85 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -843,6 +843,7 @@ static const struct scsi_host_template mptspi_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
+ .dma_alignment = 511,
.shost_groups = mptscsih_host_attr_groups,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893042..d39e198fe8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1351,7 +1351,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_rev << 8) |
(params->drv_eng);
strscpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ sizeof(drv_version.name));
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 180a008d38ea..2f16f543079b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4561,9 +4561,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
- * We can let the block layer handle this by setting
- * blk_queue_segment_boundary to page boundaries and
- * blk_max_segment_size to page size when setting up the request queue.
+ * We can let the block layer handle this by setting seg_boundary_mask
+ * to page boundaries and max_segment_size to page size when setting up
+ * the request queue.
* For write requests, a TIDAW must not cross track boundaries, because
* we have to set the CBC flag on the last tidaw for each track.
*/
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 3d9c56ac8224..9e77b8e1ea7c 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2631,7 +2631,6 @@ static void FPT_sres(u32 port, unsigned char p_card,
WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4);
- currTar_Info = &FPT_sccbMgrTbl[p_card][our_target];
msgRetryCount = 0;
do {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 634f2f501c6c..065db86d6021 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -53,7 +53,7 @@ config SCSI_ESP_PIO
config SCSI_NETLINK
bool
- default n
+ default n
depends on NET
config SCSI_PROC_FS
@@ -327,7 +327,7 @@ config ISCSI_TCP
config ISCSI_BOOT_SYSFS
tristate "iSCSI Boot Sysfs Interface"
- default n
+ default n
help
This option enables support for exposing iSCSI boot information
via sysfs to userspace. If you wish to export this information,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 378906f77909..ad39797890e5 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -295,7 +295,13 @@ static void __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
}
-static struct platform_driver amiga_a3000_scsi_driver = {
+/*
+ * amiga_a3000_scsi_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_a3000_scsi_driver __refdata = {
.remove_new = __exit_p(amiga_a3000_scsi_remove),
.driver = {
.name = "amiga-a3000-scsi",
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index e435fc06a624..d9103adc87fe 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -108,7 +108,13 @@ static void __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
}
-static struct platform_driver amiga_a4000t_scsi_driver = {
+/*
+ * amiga_a4000t_scsi_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_a4000t_scsi_driver __refdata = {
.remove_new = __exit_p(amiga_a4000t_scsi_remove),
.driver = {
.name = "amiga-a4000t-scsi",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 055adb349b0e..83f16fc14d96 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -746,6 +746,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
/* need to have host registered before triggering any interrupt */
list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
+ shpnt->no_highmem = true;
shpnt->io_port = setup->io_port;
shpnt->n_io_port = IO_RANGE;
shpnt->irq = setup->irq;
@@ -2940,12 +2941,6 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
return 0;
}
-static int aha152x_adjust_queue(struct scsi_device *device)
-{
- blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
- return 0;
-}
-
static const struct scsi_host_template aha152x_driver_template = {
.module = THIS_MODULE,
.name = AHA152X_REVID,
@@ -2961,7 +2956,6 @@ static const struct scsi_host_template aha152x_driver_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
- .slave_alloc = aha152x_adjust_queue,
.cmd_size = sizeof(struct aha152x_cmd_priv),
};
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 4bc53eec4c83..863f0932ef59 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -8,79 +8,80 @@ config SCSI_AIC79XX
depends on PCI && HAS_IOPORT && SCSI
select SCSI_SPI_ATTRS
help
- This driver supports all of Adaptec's Ultra 320 PCI-X
- based SCSI controllers.
+ This driver supports all of Adaptec's Ultra 320 PCI-X
+ based SCSI controllers.
config AIC79XX_CMDS_PER_DEVICE
int "Maximum number of TCQ commands per device"
depends on SCSI_AIC79XX
default "32"
help
- Specify the number of commands you would like to allocate per SCSI
- device when Tagged Command Queueing (TCQ) is enabled on that device.
+ Specify the number of commands you would like to allocate per SCSI
+ device when Tagged Command Queueing (TCQ) is enabled on that device.
- This is an upper bound value for the number of tagged transactions
- to be used for any device. The aic7xxx driver will automatically
- vary this number based on device behavior. For devices with a
- fixed maximum, the driver will eventually lock to this maximum
- and display a console message indicating this value.
+ This is an upper bound value for the number of tagged transactions
+ to be used for any device. The aic7xxx driver will automatically
+ vary this number based on device behavior. For devices with a
+ fixed maximum, the driver will eventually lock to this maximum
+ and display a console message indicating this value.
- Due to resource allocation issues in the Linux SCSI mid-layer, using
- a high number of commands per device may result in memory allocation
- failures when many devices are attached to the system. For this reason,
- the default is set to 32. Higher values may result in higher performance
- on some devices. The upper bound is 253. 0 disables tagged queueing.
+ Due to resource allocation issues in the Linux SCSI mid-layer, using
+ a high number of commands per device may result in memory allocation
+ failures when many devices are attached to the system. For this
+ reason, the default is set to 32. Higher values may result in higher
+ performance on some devices. The upper bound is 253. 0 disables
+ tagged queueing.
- Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic79xx.rst for details.
+ Per device tag depth can be controlled via the kernel command line
+ "tag_info" option. See Documentation/scsi/aic79xx.rst for details.
config AIC79XX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
depends on SCSI_AIC79XX
default "5000"
help
- The number of milliseconds to delay after an initial bus reset.
- The bus settle delay following all error recovery actions is
- dictated by the SCSI layer and is not affected by this value.
+ The number of milliseconds to delay after an initial bus reset.
+ The bus settle delay following all error recovery actions is
+ dictated by the SCSI layer and is not affected by this value.
- Default: 5000 (5 seconds)
+ Default: 5000 (5 seconds)
config AIC79XX_BUILD_FIRMWARE
bool "Build Adapter Firmware with Kernel Build"
depends on SCSI_AIC79XX && !PREVENT_FIRMWARE_BUILD
help
- This option should only be enabled if you are modifying the firmware
- source to the aic79xx driver and wish to have the generated firmware
- include files updated during a normal kernel build. The assembler
- for the firmware requires lex and yacc or their equivalents, as well
- as the db v1 library. You may have to install additional packages
- or modify the assembler Makefile or the files it includes if your
- build environment is different than that of the author.
+ This option should only be enabled if you are modifying the firmware
+ source to the aic79xx driver and wish to have the generated firmware
+ include files updated during a normal kernel build. The assembler
+ for the firmware requires lex and yacc or their equivalents, as well
+ as the db v1 library. You may have to install additional packages
+ or modify the assembler Makefile or the files it includes if your
+ build environment is different than that of the author.
config AIC79XX_DEBUG_ENABLE
bool "Compile in Debugging Code"
depends on SCSI_AIC79XX
default y
help
- Compile in aic79xx debugging code that can be useful in diagnosing
- driver errors.
+ Compile in aic79xx debugging code that can be useful in diagnosing
+ driver errors.
config AIC79XX_DEBUG_MASK
int "Debug code enable mask (16383 for all debugging)"
depends on SCSI_AIC79XX
default "0"
help
- Bit mask of debug options that is only valid if the
- CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask
- are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the
- variable ahd_debug in that file to find them.
+ Bit mask of debug options that is only valid if the
+ CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask
+ are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the
+ variable ahd_debug in that file to find them.
config AIC79XX_REG_PRETTY_PRINT
bool "Decode registers during diagnostics"
depends on SCSI_AIC79XX
default y
help
- Compile in register value tables for the output of expanded register
- contents in diagnostics. This make it much easier to understand debug
- output without having to refer to a data book and/or the aic7xxx.reg
- file.
+ Compile in register value tables for the output of expanded register
+ contents in diagnostics. This make it much easier to understand debug
+ output without having to refer to a data book and/or the aic7xxx.reg
+ file.
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index f0425145a5f4..8f87f2d8ba9f 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -8,84 +8,85 @@ config SCSI_AIC7XXX
depends on (PCI || EISA) && HAS_IOPORT && SCSI
select SCSI_SPI_ATTRS
help
- This driver supports all of Adaptec's Fast through Ultra 160 PCI
- based SCSI controllers as well as the aic7770 based EISA and VLB
- SCSI controllers (the 274x and 284x series). For AAA and ARO based
- configurations, only SCSI functionality is provided.
+ This driver supports all of Adaptec's Fast through Ultra 160 PCI
+ based SCSI controllers as well as the aic7770 based EISA and VLB
+ SCSI controllers (the 274x and 284x series). For AAA and ARO based
+ configurations, only SCSI functionality is provided.
- To compile this driver as a module, choose M here: the
- module will be called aic7xxx.
+ To compile this driver as a module, choose M here: the
+ module will be called aic7xxx.
config AIC7XXX_CMDS_PER_DEVICE
int "Maximum number of TCQ commands per device"
depends on SCSI_AIC7XXX
default "32"
help
- Specify the number of commands you would like to allocate per SCSI
- device when Tagged Command Queueing (TCQ) is enabled on that device.
+ Specify the number of commands you would like to allocate per SCSI
+ device when Tagged Command Queueing (TCQ) is enabled on that device.
- This is an upper bound value for the number of tagged transactions
- to be used for any device. The aic7xxx driver will automatically
- vary this number based on device behavior. For devices with a
- fixed maximum, the driver will eventually lock to this maximum
- and display a console message indicating this value.
+ This is an upper bound value for the number of tagged transactions
+ to be used for any device. The aic7xxx driver will automatically
+ vary this number based on device behavior. For devices with a
+ fixed maximum, the driver will eventually lock to this maximum
+ and display a console message indicating this value.
- Due to resource allocation issues in the Linux SCSI mid-layer, using
- a high number of commands per device may result in memory allocation
- failures when many devices are attached to the system. For this reason,
- the default is set to 32. Higher values may result in higher performance
- on some devices. The upper bound is 253. 0 disables tagged queueing.
+ Due to resource allocation issues in the Linux SCSI mid-layer, using
+ a high number of commands per device may result in memory allocation
+ failures when many devices are attached to the system. For this
+ reason, the default is set to 32. Higher values may result in higher
+ performance on some devices. The upper bound is 253. 0 disables tagged
+ queueing.
- Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic7xxx.rst for details.
+ Per device tag depth can be controlled via the kernel command line
+ "tag_info" option. See Documentation/scsi/aic7xxx.rst for details.
config AIC7XXX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
depends on SCSI_AIC7XXX
default "5000"
help
- The number of milliseconds to delay after an initial bus reset.
- The bus settle delay following all error recovery actions is
- dictated by the SCSI layer and is not affected by this value.
+ The number of milliseconds to delay after an initial bus reset.
+ The bus settle delay following all error recovery actions is
+ dictated by the SCSI layer and is not affected by this value.
- Default: 5000 (5 seconds)
+ Default: 5000 (5 seconds)
config AIC7XXX_BUILD_FIRMWARE
bool "Build Adapter Firmware with Kernel Build"
depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
help
- This option should only be enabled if you are modifying the firmware
- source to the aic7xxx driver and wish to have the generated firmware
- include files updated during a normal kernel build. The assembler
- for the firmware requires lex and yacc or their equivalents, as well
- as the db v1 library. You may have to install additional packages
- or modify the assembler Makefile or the files it includes if your
- build environment is different than that of the author.
+ This option should only be enabled if you are modifying the firmware
+ source to the aic7xxx driver and wish to have the generated firmware
+ include files updated during a normal kernel build. The assembler
+ for the firmware requires lex and yacc or their equivalents, as well
+ as the db v1 library. You may have to install additional packages
+ or modify the assembler Makefile or the files it includes if your
+ build environment is different than that of the author.
config AIC7XXX_DEBUG_ENABLE
bool "Compile in Debugging Code"
depends on SCSI_AIC7XXX
default y
help
- Compile in aic7xxx debugging code that can be useful in diagnosing
- driver errors.
+ Compile in aic7xxx debugging code that can be useful in diagnosing
+ driver errors.
config AIC7XXX_DEBUG_MASK
- int "Debug code enable mask (2047 for all debugging)"
- depends on SCSI_AIC7XXX
- default "0"
- help
- Bit mask of debug options that is only valid if the
- CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
- are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
- variable ahc_debug in that file to find them.
+ int "Debug code enable mask (2047 for all debugging)"
+ depends on SCSI_AIC7XXX
+ default "0"
+ help
+ Bit mask of debug options that is only valid if the
+ CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
+ are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
+ variable ahc_debug in that file to find them.
config AIC7XXX_REG_PRETTY_PRINT
- bool "Decode registers during diagnostics"
- depends on SCSI_AIC7XXX
+ bool "Decode registers during diagnostics"
+ depends on SCSI_AIC7XXX
default y
- help
- Compile in register value tables for the output of expanded register
- contents in diagnostics. This make it much easier to understand debug
- output without having to refer to a data book and/or the aic7xxx.reg
- file.
+ help
+ Compile in register value tables for the output of expanded register
+ contents in diagnostics. This make it much easier to understand debug
+ output without having to refer to a data book and/or the aic7xxx.reg
+ file.
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 8a3340d8d7ad..538a5867e8ab 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -14,6 +14,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
+#include <scsi/sas_ata.h>
#include <scsi/scsi_host.h>
#include "aic94xx.h"
@@ -24,6 +25,7 @@
/* The format is "version.release.patchlevel" */
#define ASD_DRIVER_VERSION "1.0.3"
+#define DRV_NAME "aic94xx"
static int use_msi = 0;
module_param_named(use_msi, use_msi, int, S_IRUGO);
@@ -34,32 +36,16 @@ MODULE_PARM_DESC(use_msi, "\n"
static struct scsi_transport_template *aic94xx_transport_template;
static int asd_scan_finished(struct Scsi_Host *, unsigned long);
static void asd_scan_start(struct Scsi_Host *);
+static const struct attribute_group *asd_sdev_groups[];
static const struct scsi_host_template aic94xx_sht = {
- .module = THIS_MODULE,
- /* .name is initialized */
- .name = "aic94xx",
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+ LIBSAS_SHT_BASE
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
.can_queue = 1,
- .this_id = -1,
.sg_tablesize = SG_ALL,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.track_queue_depth = 1,
+ .sdev_groups = asd_sdev_groups,
};
static int asd_map_memio(struct asd_ha_struct *asd_ha)
@@ -951,6 +937,11 @@ static void asd_remove_driver_attrs(struct device_driver *driver)
driver_remove_file(driver, &driver_attr_version);
}
+static const struct attribute_group *asd_sdev_groups[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
+
static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_dev_found = asd_dev_found,
.lldd_dev_gone = asd_dev_gone,
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index d99e70914817..742625ac7d99 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -878,7 +878,13 @@ static void __exit atari_scsi_remove(struct platform_device *pdev)
atari_stram_free(atari_dma_buffer);
}
-static struct platform_driver atari_scsi_driver = {
+/*
+ * atari_scsi_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver atari_scsi_driver __refdata = {
.remove_new = __exit_p(atari_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 52db147d9979..f6dd077d47c9 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d91659811eb3..eb3209103312 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -128,10 +128,8 @@ retry_ofld:
BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
"retry ofld..%d\n", i++);
msleep_interruptible(1000);
- if (i > 3) {
- i = 0;
+ if (i > 3)
goto ofld_err;
- }
goto retry_ofld;
}
goto ofld_err;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index d649b7a2a879..9a3f2ed050bd 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1185,9 +1185,6 @@ static struct pci_error_handlers csio_err_handler = {
static struct pci_driver csio_pci_driver = {
.name = KBUILD_MODNAME,
- .driver = {
- .owner = THIS_MODULE,
- },
.id_table = csio_pci_tbl,
.probe = csio_probe_one,
.remove = csio_remove_one,
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index e0e15b44a2e6..52405c6462f8 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -216,7 +216,7 @@ void cxlflash_term_global_luns(void)
/**
* cxlflash_manage_lun() - handles LUN management activities
* @sdev: SCSI device associated with LUN.
- * @manage: Manage ioctl data structure.
+ * @arg: Manage ioctl data structure.
*
* This routine is used to notify the driver about a LUN's WWID and associate
* SCSI devices (sdev) with a global LUN instance. Additionally it serves to
@@ -224,9 +224,9 @@ void cxlflash_term_global_luns(void)
*
* Return: 0 on success, -errno on failure
*/
-int cxlflash_manage_lun(struct scsi_device *sdev,
- struct dk_cxlflash_manage_lun *manage)
+int cxlflash_manage_lun(struct scsi_device *sdev, void *arg)
{
+ struct dk_cxlflash_manage_lun *manage = arg;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = NULL;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index e8382cc5cf23..e4b45b7e3277 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3280,13 +3280,13 @@ static char *decode_hioctl(unsigned int cmd)
/**
* cxlflash_lun_provision() - host LUN provisioning handler
* @cfg: Internal structure associated with the host.
- * @lunprov: Kernel copy of userspace ioctl data structure.
+ * @arg: Kernel copy of userspace ioctl data structure.
*
* Return: 0 on success, -errno on failure
*/
-static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
- struct ht_cxlflash_lun_provision *lunprov)
+static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
{
+ struct ht_cxlflash_lun_provision *lunprov = arg;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
struct sisl_ioarcb rcb;
@@ -3371,16 +3371,16 @@ out:
/**
* cxlflash_afu_debug() - host AFU debug handler
* @cfg: Internal structure associated with the host.
- * @afu_dbg: Kernel copy of userspace ioctl data structure.
+ * @arg: Kernel copy of userspace ioctl data structure.
*
* For debug requests requiring a data buffer, always provide an aligned
* (cache line) buffer to the AFU to appease any alignment requirements.
*
* Return: 0 on success, -errno on failure
*/
-static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
- struct ht_cxlflash_afu_debug *afu_dbg)
+static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
{
+ struct ht_cxlflash_afu_debug *afu_dbg = arg;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
struct sisl_ioarcb rcb;
@@ -3494,10 +3494,8 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
size_t size;
hioctl ioctl;
} ioctl_tbl[] = { /* NOTE: order matters here */
- { sizeof(struct ht_cxlflash_lun_provision),
- (hioctl)cxlflash_lun_provision },
- { sizeof(struct ht_cxlflash_afu_debug),
- (hioctl)cxlflash_afu_debug },
+ { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
+ { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
};
/* Hold read semaphore so we can drain if needed */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index e1b55b03e812..2d356fe2457a 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -729,8 +729,7 @@ out:
return rc;
}
-int cxlflash_disk_release(struct scsi_device *sdev,
- struct dk_cxlflash_release *release)
+int cxlflash_disk_release(struct scsi_device *sdev, void *release)
{
return _cxlflash_disk_release(sdev, NULL, release);
}
@@ -955,8 +954,7 @@ out:
return rc;
}
-static int cxlflash_disk_detach(struct scsi_device *sdev,
- struct dk_cxlflash_detach *detach)
+static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
{
return _cxlflash_disk_detach(sdev, NULL, detach);
}
@@ -1305,7 +1303,7 @@ retry:
/**
* cxlflash_disk_attach() - attach a LUN to a context
* @sdev: SCSI device associated with LUN.
- * @attach: Attach ioctl data structure.
+ * @arg: Attach ioctl data structure.
*
* Creates a context and attaches LUN to it. A LUN can only be attached
* one time to a context (subsequent attaches for the same context/LUN pair
@@ -1314,9 +1312,9 @@ retry:
*
* Return: 0 on success, -errno on failure
*/
-static int cxlflash_disk_attach(struct scsi_device *sdev,
- struct dk_cxlflash_attach *attach)
+static int cxlflash_disk_attach(struct scsi_device *sdev, void *arg)
{
+ struct dk_cxlflash_attach *attach = arg;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
@@ -1621,7 +1619,7 @@ err1:
/**
* cxlflash_afu_recover() - initiates AFU recovery
* @sdev: SCSI device associated with LUN.
- * @recover: Recover ioctl data structure.
+ * @arg: Recover ioctl data structure.
*
* Only a single recovery is allowed at a time to avoid exhausting CXL
* resources (leading to recovery failure) in the event that we're up
@@ -1648,9 +1646,9 @@ err1:
*
* Return: 0 on success, -errno on failure
*/
-static int cxlflash_afu_recover(struct scsi_device *sdev,
- struct dk_cxlflash_recover_afu *recover)
+static int cxlflash_afu_recover(struct scsi_device *sdev, void *arg)
{
+ struct dk_cxlflash_recover_afu *recover = arg;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
@@ -1829,13 +1827,13 @@ out:
/**
* cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
* @sdev: SCSI device associated with LUN.
- * @verify: Verify ioctl data structure.
+ * @arg: Verify ioctl data structure.
*
* Return: 0 on success, -errno on failure
*/
-static int cxlflash_disk_verify(struct scsi_device *sdev,
- struct dk_cxlflash_verify *verify)
+static int cxlflash_disk_verify(struct scsi_device *sdev, void *arg)
{
+ struct dk_cxlflash_verify *verify = arg;
int rc = 0;
struct ctx_info *ctxi = NULL;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
@@ -2111,16 +2109,16 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
size_t size;
sioctl ioctl;
} ioctl_tbl[] = { /* NOTE: order matters here */
- {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
+ {sizeof(struct dk_cxlflash_attach), cxlflash_disk_attach},
{sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
- {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
- {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
- {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
- {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
- {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
+ {sizeof(struct dk_cxlflash_release), cxlflash_disk_release},
+ {sizeof(struct dk_cxlflash_detach), cxlflash_disk_detach},
+ {sizeof(struct dk_cxlflash_verify), cxlflash_disk_verify},
+ {sizeof(struct dk_cxlflash_recover_afu), cxlflash_afu_recover},
+ {sizeof(struct dk_cxlflash_manage_lun), cxlflash_manage_lun},
{sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
- {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
- {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
+ {sizeof(struct dk_cxlflash_resize), cxlflash_vlun_resize},
+ {sizeof(struct dk_cxlflash_clone), cxlflash_disk_clone},
};
/* Hold read semaphore so we can drain if needed */
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0e3b45964066..fe8c975d13d7 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -114,18 +114,16 @@ struct cxlflash_global {
struct page *err_page; /* One page of all 0xF for error notification */
};
-int cxlflash_vlun_resize(struct scsi_device *sdev,
- struct dk_cxlflash_resize *resize);
+int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize);
int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
struct dk_cxlflash_resize *resize);
int cxlflash_disk_release(struct scsi_device *sdev,
- struct dk_cxlflash_release *release);
+ void *release);
int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
struct dk_cxlflash_release *release);
-int cxlflash_disk_clone(struct scsi_device *sdev,
- struct dk_cxlflash_clone *clone);
+int cxlflash_disk_clone(struct scsi_device *sdev, void *arg);
int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
@@ -145,8 +143,7 @@ void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-int cxlflash_manage_lun(struct scsi_device *sdev,
- struct dk_cxlflash_manage_lun *manage);
+int cxlflash_manage_lun(struct scsi_device *sdev, void *manage);
int check_state(struct cxlflash_cfg *cfg);
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index cbd5a648a131..35326e311991 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -819,8 +819,7 @@ out:
return rc;
}
-int cxlflash_vlun_resize(struct scsi_device *sdev,
- struct dk_cxlflash_resize *resize)
+int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize)
{
return _cxlflash_vlun_resize(sdev, NULL, resize);
}
@@ -1178,7 +1177,7 @@ err:
/**
* cxlflash_disk_clone() - clone a context by making snapshot of another
* @sdev: SCSI device associated with LUN owning virtual LUN.
- * @clone: Clone ioctl data structure.
+ * @arg: Clone ioctl data structure.
*
* This routine effectively performs cxlflash_disk_open operation for each
* in-use virtual resource in the source context. Note that the destination
@@ -1187,9 +1186,9 @@ err:
*
* Return: 0 on success, -errno on failure
*/
-int cxlflash_disk_clone(struct scsi_device *sdev,
- struct dk_cxlflash_clone *clone)
+int cxlflash_disk_clone(struct scsi_device *sdev, void *arg)
{
+ struct dk_cxlflash_clone *clone = arg;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 1e4550156b73..d223f482488f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -643,7 +643,8 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern void hisi_sas_remove(struct platform_device *pdev);
-extern int hisi_sas_slave_configure(struct scsi_device *sdev);
+int hisi_sas_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 35f8e00850d6..ec1a3e7ee94d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -868,10 +868,11 @@ err_out:
return rc;
}
-int hisi_sas_slave_configure(struct scsi_device *sdev)
+int hisi_sas_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_slave_configure(sdev);
+ int ret = sas_device_configure(sdev, lim);
if (ret)
return ret;
@@ -880,7 +881,7 @@ int hisi_sas_slave_configure(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
+EXPORT_SYMBOL_GPL(hisi_sas_device_configure);
void hisi_sas_scan_start(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3c555579f9a1..71b5008c3552 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1735,28 +1735,12 @@ static struct attribute *host_v1_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v1_hw);
static const struct scsi_host_template sht_v1_hw = {
- .name = DRV_NAME,
- .proc_name = DRV_NAME,
- .module = THIS_MODULE,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
+ LIBSAS_SHT_BASE_NO_SLAVE_INIT
+ .device_configure = hisi_sas_device_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
- .this_id = -1,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = hisi_sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 73b378837da7..342d75f12051 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3544,6 +3544,11 @@ static struct attribute *host_v2_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v2_hw);
+static const struct attribute_group *sdev_groups_v2_hw[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
+
static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@@ -3562,29 +3567,14 @@ static void map_queues_v2_hw(struct Scsi_Host *shost)
}
static const struct scsi_host_template sht_v2_hw = {
- .name = DRV_NAME,
- .proc_name = DRV_NAME,
- .module = THIS_MODULE,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
+ LIBSAS_SHT_BASE_NO_SLAVE_INIT
+ .device_configure = hisi_sas_device_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
- .this_id = -1,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = hisi_sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.shost_groups = host_v2_hw_groups,
+ .sdev_groups = sdev_groups_v2_hw,
.host_reset = hisi_sas_host_reset,
.map_queues = map_queues_v2_hw,
.host_tagset = 1,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 34f96cc35342..feda9b54b443 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2902,11 +2902,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
}
static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
-static int slave_configure_v3_hw(struct scsi_device *sdev)
+static int device_configure_v3_hw(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
struct hisi_hba *hisi_hba = shost_priv(shost);
- int ret = hisi_sas_slave_configure(sdev);
+ int ret = hisi_sas_device_configure(sdev, lim);
struct device *dev = hisi_hba->dev;
if (ret)
@@ -2937,6 +2938,11 @@ static struct attribute *host_v3_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v3_hw);
+static const struct attribute_group *sdev_groups_v3_hw[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
+
#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
struct hisi_sas_debugfs_reg_lu {
@@ -3323,31 +3329,16 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
}
static const struct scsi_host_template sht_v3_hw = {
- .name = DRV_NAME,
- .proc_name = DRV_NAME,
- .module = THIS_MODULE,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = slave_configure_v3_hw,
+ LIBSAS_SHT_BASE_NO_SLAVE_INIT
+ .device_configure = device_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.map_queues = hisi_sas_map_queues,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
- .this_id = -1,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = hisi_sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.shost_groups = host_v3_hw_groups,
+ .sdev_groups = sdev_groups_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 2d92549e5243..7f987335b44c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -479,6 +479,12 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
else
shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+ /* 32-byte (dword) is a common minimum for HBAs. */
+ if (sht->dma_alignment)
+ shost->dma_alignment = sht->dma_alignment;
+ else
+ shost->dma_alignment = 3;
+
/*
* assume a 4GB boundary, if not set
*/
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index af18d20f3079..49c57a9c110b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index f5334ccbf2ca..e889f268601b 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1151,11 +1151,11 @@ static struct attribute *hptiop_host_attrs[] = {
ATTRIBUTE_GROUPS(hptiop_host);
-static int hptiop_slave_config(struct scsi_device *sdev)
+static int hptiop_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (sdev->type == TYPE_TAPE)
- blk_queue_max_hw_sectors(sdev->request_queue, 8192);
-
+ lim->max_hw_sectors = 8192;
return 0;
}
@@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = {
.emulated = 0,
.proc_name = driver_name,
.shost_groups = hptiop_host_groups,
- .slave_configure = hptiop_slave_config,
+ .device_configure = hptiop_device_configure,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
.cmd_size = sizeof(struct hpt_cmd_priv),
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 05b126bfd18b..a3d1013c8307 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -5541,8 +5541,6 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
rport->supported_classes |= FC_COS_CLASS2;
if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
- if (rport->rqst_q)
- blk_queue_max_segments(rport->rqst_q, 1);
} else
tgt_dbg(tgt, "rport add failed\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -6391,8 +6389,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
ibmvfc_init_sub_crqs(vhost);
- if (shost_to_fc_host(shost)->rqst_q)
- blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
spin_lock(&ibmvfc_driver_lock);
list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -6547,6 +6543,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
.get_starget_port_id = ibmvfc_get_starget_port_id,
.show_starget_port_id = 1,
+ .max_bsg_segments = 1,
.bsg_request = ibmvfc_bsg_request,
.bsg_timeout = ibmvfc_bsg_timeout,
};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 180a5ddedb2c..21339da505f1 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1100,16 +1100,6 @@ static int device_check(imm_struct *dev, bool autodetect)
return -ENODEV;
}
-/*
- * imm cannot deal with highmem, so this causes all IO pages for this host
- * to reside in low memory (hence mapped)
- */
-static int imm_adjust_queue(struct scsi_device *device)
-{
- blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
- return 0;
-}
-
static const struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
@@ -1123,7 +1113,6 @@ static const struct scsi_host_template imm_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.can_queue = 1,
- .slave_alloc = imm_adjust_queue,
.cmd_size = sizeof(struct scsi_pointer),
};
@@ -1235,6 +1224,7 @@ static int __imm_attach(struct parport *pb)
host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
if (!host)
goto out1;
+ host->no_highmem = true;
host->io_port = pb->base;
host->n_io_port = ports;
host->dma_channel = -1;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3819f7c42788..388c8a10295a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4769,15 +4769,17 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
}
/**
- * ipr_slave_configure - Configure a SCSI device
+ * ipr_device_configure - Configure a SCSI device
* @sdev: scsi device struct
+ * @lim: queue limits
*
* This function configures the specified scsi device.
*
* Return value:
* 0 on success
**/
-static int ipr_slave_configure(struct scsi_device *sdev)
+static int ipr_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -4798,7 +4800,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_report_opcodes = 1;
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
- blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
+ lim->max_hw_sectors = IPR_VSET_MAX_SECTORS;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -6397,7 +6399,7 @@ static const struct scsi_host_template driver_template = {
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
.slave_alloc = ipr_slave_alloc,
- .slave_configure = ipr_slave_configure,
+ .device_configure = ipr_device_configure,
.slave_destroy = ipr_slave_destroy,
.scan_finished = ipr_scan_finished,
.target_destroy = ipr_target_destroy,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index c582a3932cea..de2aefcf2089 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -149,33 +149,20 @@ static struct attribute *isci_host_attrs[] = {
ATTRIBUTE_GROUPS(isci_host);
-static const struct scsi_host_template isci_sht = {
+static const struct attribute_group *isci_sdev_groups[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .proc_name = DRV_NAME,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+static const struct scsi_host_template isci_sht = {
+ LIBSAS_SHT_BASE
.scan_finished = isci_host_scan_finished,
.scan_start = isci_host_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
.can_queue = ISCI_CAN_QUEUE_VAL,
- .this_id = -1,
.sg_tablesize = SG_ALL,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_abort_handler = sas_eh_abort_handler,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
+ .eh_abort_handler = sas_eh_abort_handler,
.shost_groups = isci_host_groups,
+ .sdev_groups = isci_sdev_groups,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8e14cea15f98..60688f18fac6 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -943,6 +943,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
shost->max_id = 0;
shost->max_channel = 0;
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+ shost->dma_alignment = 0;
rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
if (rc < 0)
@@ -1065,7 +1066,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
if (conn->datadgst_en)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
sdev->request_queue);
- blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 12e2653846e3..4c69fc63c119 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -964,3 +964,87 @@ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
force_phy_id, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
+
+static ssize_t sas_ncq_prio_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ bool supported;
+ int rc;
+
+ rc = ata_ncq_prio_supported(ddev->sata_dev.ap, sdev, &supported);
+ if (rc)
+ return rc;
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+
+static struct device_attribute dev_attr_sas_ncq_prio_supported =
+ __ATTR(ncq_prio_supported, S_IRUGO, sas_ncq_prio_supported_show, NULL);
+
+static ssize_t sas_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ bool enabled;
+ int rc;
+
+ rc = ata_ncq_prio_enabled(ddev->sata_dev.ap, sdev, &enabled);
+ if (rc)
+ return rc;
+
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+
+static ssize_t sas_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ bool enable;
+ int rc;
+
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
+
+ rc = ata_ncq_prio_enable(ddev->sata_dev.ap, sdev, enable);
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static struct device_attribute dev_attr_sas_ncq_prio_enable =
+ __ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+ sas_ncq_prio_enable_show, sas_ncq_prio_enable_store);
+
+static struct attribute *sas_ata_sdev_attrs[] = {
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
+ NULL
+};
+
+static umode_t sas_ata_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+
+ if (!dev_is_sata(ddev))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group sas_ata_sdev_attr_group = {
+ .attrs = sas_ata_sdev_attrs,
+ .is_visible = sas_ata_attr_is_visible,
+};
+EXPORT_SYMBOL_GPL(sas_ata_sdev_attr_group);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f6e6db8b8aba..4e6bb3d0f163 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -26,6 +26,28 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
u8 *sas_addr, int include);
static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
+static void sas_port_add_ex_phy(struct sas_port *port, struct ex_phy *ex_phy)
+{
+ sas_port_add_phy(port, ex_phy->phy);
+ ex_phy->port = port;
+ ex_phy->phy_state = PHY_DEVICE_DISCOVERED;
+}
+
+static void sas_ex_add_parent_port(struct domain_device *dev, int phy_id)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
+
+ if (!ex->parent_port) {
+ ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
+ /* FIXME: error handling */
+ BUG_ON(!ex->parent_port);
+ BUG_ON(sas_port_add(ex->parent_port));
+ sas_port_mark_backlink(ex->parent_port);
+ }
+ sas_port_add_ex_phy(ex->parent_port, ex_phy);
+}
+
/* ---------- SMP task management ---------- */
/* Give it some long enough timeout. In seconds. */
@@ -239,8 +261,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
/* help some expanders that fail to zero sas_address in the 'no
* device' case
*/
- if (phy->attached_dev_type == SAS_PHY_UNUSED ||
- phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+ if (phy->attached_dev_type == SAS_PHY_UNUSED)
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
else
memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
@@ -857,9 +878,7 @@ static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
SAS_ADDR_SIZE) && ephy->port) {
- sas_port_add_phy(ephy->port, phy->phy);
- phy->port = ephy->port;
- phy->phy_state = PHY_DEVICE_DISCOVERED;
+ sas_port_add_ex_phy(ephy->port, phy);
return true;
}
}
@@ -963,11 +982,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
/* Parent and domain coherency */
if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) {
- sas_add_parent_port(dev, phy_id);
+ sas_ex_add_parent_port(dev, phy_id);
return 0;
}
if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) {
- sas_add_parent_port(dev, phy_id);
+ sas_ex_add_parent_port(dev, phy_id);
if (ex_phy->routing_attr == TABLE_ROUTING)
sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
return 0;
@@ -1849,9 +1868,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
- if (phy->port->num_phys == 0)
+ if (phy->port->num_phys == 0) {
list_add_tail(&phy->port->del_list,
&parent->port->sas_port_del_list);
+ if (ex_dev->parent_port == phy->port)
+ ex_dev->parent_port = NULL;
+ }
phy->port = NULL;
}
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 3804aef165ad..85948963fb97 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -189,21 +189,6 @@ static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_devic
}
}
-static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
-{
- struct expander_device *ex = &dev->ex_dev;
- struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
-
- if (!ex->parent_port) {
- ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
- /* FIXME: error handling */
- BUG_ON(!ex->parent_port);
- BUG_ON(sas_port_add(ex->parent_port));
- sas_port_mark_backlink(ex->parent_port);
- }
- sas_port_add_phy(ex->parent_port, ex_phy->phy);
-}
-
static inline struct domain_device *sas_alloc_device(void)
{
struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9047cfcd1072..da11d32840e2 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -804,14 +804,15 @@ EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
-int sas_slave_configure(struct scsi_device *scsi_dev)
+int sas_device_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
- ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
+ ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
return 0;
}
@@ -829,7 +830,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(sas_slave_configure);
+EXPORT_SYMBOL_GPL(sas_device_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 98ca7df003ef..7c147d6ea8a8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -393,6 +393,37 @@ enum hba_state {
LPFC_HBA_ERROR = -1
};
+enum lpfc_hba_flag { /* hba generic flags */
+ HBA_ERATT_HANDLED = 0, /* This flag is set when eratt handled */
+ DEFER_ERATT = 1, /* Deferred error attn in progress */
+ HBA_FCOE_MODE = 2, /* HBA function in FCoE Mode */
+ HBA_SP_QUEUE_EVT = 3, /* Slow-path qevt posted to worker thread*/
+ HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */
+ HBA_PERSISTENT_TOPO = 5, /* Persistent topology support in hba */
+ ELS_XRI_ABORT_EVENT = 6, /* ELS_XRI abort event was queued */
+ ASYNC_EVENT = 7,
+ LINK_DISABLED = 8, /* Link disabled by user */
+ FCF_TS_INPROG = 9, /* FCF table scan in progress */
+ FCF_RR_INPROG = 10, /* FCF roundrobin flogi in progress */
+ HBA_FIP_SUPPORT = 11, /* FIP support in HBA */
+ HBA_DEVLOSS_TMO = 13, /* HBA in devloss timeout */
+ HBA_RRQ_ACTIVE = 14, /* process the rrq active list */
+ HBA_IOQ_FLUSH = 15, /* I/O queues being flushed */
+ HBA_RECOVERABLE_UE = 17, /* FW supports recoverable UE */
+ HBA_FORCED_LINK_SPEED = 18, /*
+ * Firmware supports Forced Link
+ * Speed capability
+ */
+ HBA_FLOGI_ISSUED = 20, /* FLOGI was issued */
+ HBA_DEFER_FLOGI = 23, /* Defer FLOGI till read_sparm cmpl */
+ HBA_SETUP = 24, /* HBA setup completed */
+ HBA_NEEDS_CFG_PORT = 25, /* SLI3: CONFIG_PORT mbox needed */
+ HBA_HBEAT_INP = 26, /* mbox HBEAT is in progress */
+ HBA_HBEAT_TMO = 27, /* HBEAT initiated after timeout */
+ HBA_FLOGI_OUTSTANDING = 28, /* FLOGI is outstanding */
+ HBA_RHBA_CMPL = 29, /* RHBA FDMI cmd is successful */
+};
+
struct lpfc_trunk_link_state {
enum hba_state state;
uint8_t fault;
@@ -1007,35 +1038,7 @@ struct lpfc_hba {
#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
- uint32_t hba_flag; /* hba generic flags */
-#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
-#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
-#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
-#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
-#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
-#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
-#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */
-#define ASYNC_EVENT 0x80
-#define LINK_DISABLED 0x100 /* Link disabled by user */
-#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
-#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
-#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
-#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
-#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
-#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
-#define HBA_FORCED_LINK_SPEED 0x40000 /*
- * Firmware supports Forced Link Speed
- * capability
- */
-#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
-#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
-#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
-#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
-#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
-#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
-#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */
+ unsigned long hba_flag; /* hba generic flags */
struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
@@ -1284,6 +1287,7 @@ struct lpfc_hba {
uint32_t total_scsi_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
+ spinlock_t rrq_list_lock; /* lock for active_rrq_list */
struct list_head active_rrq_list;
spinlock_t hbalock;
struct work_struct unblock_request_work; /* SCSI layer unblock IOs */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3c534b3cfe91..a46c73e8d7c4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -322,7 +322,7 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- if (phba->hba_flag & HBA_FIP_SUPPORT)
+ if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag))
return scnprintf(buf, PAGE_SIZE, "1\n");
else
return scnprintf(buf, PAGE_SIZE, "0\n");
@@ -1049,7 +1049,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
- if (phba->hba_flag & LINK_DISABLED)
+ if (test_bit(LINK_DISABLED, &phba->hba_flag))
len += scnprintf(buf + len, PAGE_SIZE-len,
"Link Down - User disabled\n");
else
@@ -1292,7 +1292,7 @@ lpfc_issue_lip(struct Scsi_Host *shost)
* it doesn't make any sense to allow issue_lip
*/
if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
- (phba->hba_flag & LINK_DISABLED) ||
+ test_bit(LINK_DISABLED, &phba->hba_flag) ||
(phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
return -EPERM;
@@ -3635,7 +3635,8 @@ lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+ test_bit(HBA_PERSISTENT_TOPO,
+ &phba->hba_flag) ? 1 : 0);
}
static DEVICE_ATTR(pt, 0444,
lpfc_pt_show, NULL);
@@ -4205,8 +4206,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
&phba->sli4_hba.sli_intf);
if_type = bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf);
- if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
- (!phba->sli4_hba.pc_sli4_params.pls &&
+ if ((test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag) ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
(sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
val == 4) {
@@ -4309,7 +4310,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
- phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ test_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag))
return -EPERM;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
@@ -6497,7 +6498,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
+ if ((lpfc_is_link_up(phba)) &&
+ !test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
switch(phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -6533,7 +6535,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
- } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
+ } else if (lpfc_is_link_up(phba) &&
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_1GBPS:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -6718,7 +6721,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count -= lso->invalid_crc_count;
hs->error_frames -= lso->error_frames;
- if (phba->hba_flag & HBA_FCOE_MODE) {
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
hs->lip_count = -1;
hs->nos_count = (phba->link_events >> 1);
hs->nos_count -= lso->link_events;
@@ -6816,7 +6819,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
lso->error_frames = pmb->un.varRdLnk.crcCnt;
- if (phba->hba_flag & HBA_FCOE_MODE)
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag))
lso->link_events = (phba->link_events >> 1);
else
lso->link_events = (phba->fc_eventTag >> 1);
@@ -7161,11 +7164,11 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_ZEPHYR_DCSP:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
- phba->hba_flag |= HBA_FCOE_MODE;
+ set_bit(HBA_FCOE_MODE, &phba->hba_flag);
break;
default:
/* for others, clear the flag */
- phba->hba_flag &= ~HBA_FCOE_MODE;
+ clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
}
}
@@ -7236,7 +7239,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_get_hba_function_mode(phba);
/* BlockGuard allowed for FC only. */
- if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
+ if (phba->cfg_enable_bg && test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0581 BlockGuard feature not supported\n");
/* If set, clear the BlockGuard support param */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 529df1768fa8..4156419c52c7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5002,7 +5002,8 @@ lpfc_forced_link_speed(struct bsg_job *job)
goto job_error;
}
- forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED,
+ &phba->hba_flag)
? LPFC_FORCED_LINK_SPEED_SUPPORTED
: LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
job_error:
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 8cc08e58dc05..376d0f958b72 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -291,9 +291,9 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
if (ulp_status) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6438 Unsol CT: status:x%x/x%x did : x%x\n",
- ulp_status, ulp_word4, did);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "6438 Unsol CT: status:x%x/x%x did : x%x\n",
+ ulp_status, ulp_word4, did);
return;
}
@@ -303,17 +303,17 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6439 Unsol CT: NDLP Not Found for DID : x%x",
- did);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "6439 Unsol CT: NDLP Not Found for DID : x%x",
+ did);
return;
}
ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "6442 MI Cmd : x%x Not Supported\n", mi_cmd);
lpfc_ct_reject_event(ndlp, ct_req,
bf_get(wqe_ctxt_tag,
&ctiocbq->wqe.xmit_els_rsp.wqe_com),
@@ -2173,7 +2173,7 @@ lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
struct lpfc_nodelist *ndlp;
int i;
- phba->hba_flag |= HBA_RHBA_CMPL;
+ set_bit(HBA_RHBA_CMPL, &phba->hba_flag);
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -2368,7 +2368,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* for the physical port completes successfully.
* We may have to defer the RPRT accordingly.
*/
- if (phba->hba_flag & HBA_RHBA_CMPL) {
+ if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -2785,7 +2785,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
u32 tcfg;
u8 i, cnt;
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
cnt = 0;
if (phba->sli_rev == LPFC_SLI_REV4) {
tcfg = phba->sli4_hba.conf_trunk;
@@ -2859,7 +2859,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
struct lpfc_hba *phba = vport->phba;
u32 speeds = 0;
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
speeds = HBA_PORTSPEED_1GFC;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f7c28dc73bf6..c32bc773ab29 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -189,11 +189,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
* If this command is for fabric controller and HBA running
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
- if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
+ if (did == Fabric_DID &&
+ test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
+ (elscmd == ELS_CMD_FLOGI ||
+ elscmd == ELS_CMD_FDISC ||
+ elscmd == ELS_CMD_LOGO))
switch (elscmd) {
case ELS_CMD_FLOGI:
elsiocb->cmd_flag |=
@@ -965,7 +965,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
*/
- if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+ if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
(phba->fcf.fcf_flag & FCF_DISCOVERY)) {
if (phba->link_state < LPFC_LINK_UP)
goto stop_rr_fcf_flogi;
@@ -999,7 +999,7 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
- ":x%x Data x%x x%x\n",
+ ":x%x Data x%lx x%x\n",
ulp_status, ulp_word4, tmo,
phba->hba_flag, phba->fcf.fcf_flag);
@@ -1119,7 +1119,7 @@ stop_rr_fcf_flogi:
if (sp->cmn.fPort)
rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
ulp_word4);
- else if (!(phba->hba_flag & HBA_FCOE_MODE))
+ else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -1149,14 +1149,15 @@ stop_rr_fcf_flogi:
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
+ clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag);
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
if (!rc) {
/* Mark the FCF discovery process done */
- if (phba->hba_flag & HBA_FIP_SUPPORT)
+ if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag))
lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
LOG_ELS,
"2769 FLOGI to FCF (x%x) "
@@ -1164,8 +1165,9 @@ stop_rr_fcf_flogi:
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
+ clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag);
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
@@ -1202,7 +1204,7 @@ flogifail:
}
out:
if (!flogi_in_retry)
- phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
+ clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -1372,11 +1374,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Avoid race with FLOGI completion and hba_flags. */
- phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
+ set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
+ set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
+ clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
+ clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
return 1;
@@ -1413,7 +1417,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%x\n",
+ " ox_id: x%x, hba_flag x%lx\n",
phba->defer_flogi_acc_rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag);
@@ -7415,7 +7419,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_expl = LSEXP_REQ_UNSUPPORTED;
goto error;
@@ -7738,7 +7743,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
if (phba->sli_rev < LPFC_SLI_REV4 ||
- phba->hba_flag & HBA_FCOE_MODE ||
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag) ||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
@@ -8443,7 +8448,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
/* Defer ACC response until AFTER we issue a FLOGI */
- if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
+ if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) {
phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
&wqe->xmit_els_rsp.wqe_com);
phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
@@ -8453,7 +8458,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%x\n",
+ " ox_id: x%x, hba_flag x%lx\n",
phba->defer_flogi_acc_rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e42fa9c822b5..153770bdc56a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -487,7 +487,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
recovering = true;
} else {
/* Physical port path. */
- if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
+ if (test_bit(HBA_FLOGI_OUTSTANDING,
+ &phba->hba_flag))
recovering = true;
}
break;
@@ -652,14 +653,15 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
if (!fcf_inuse)
return;
- if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+ if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
+ !lpfc_fcf_inuse(phba)) {
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ if (test_and_set_bit(HBA_DEVLOSS_TMO,
+ &phba->hba_flag)) {
spin_unlock_irq(&phba->hbalock);
return;
}
- phba->hba_flag |= HBA_DEVLOSS_TMO;
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2847 Last remote node (x%x) using "
"FCF devloss tmo\n", nlp_did);
@@ -671,8 +673,9 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
"in progress\n");
return;
}
- if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
+ if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) &&
+ !test_bit(FCF_RR_INPROG, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2869 Devloss tmo to idle FIP engine, "
"unreg in-use FCF and rescan.\n");
@@ -680,11 +683,10 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
lpfc_unregister_fcf_rescan(phba);
return;
}
- spin_unlock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_TS_INPROG)
+ if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2870 FCF table scan in progress\n");
- if (phba->hba_flag & FCF_RR_INPROG)
+ if (test_bit(FCF_RR_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2871 FLOGI roundrobin FCF failover "
"in progress\n");
@@ -978,18 +980,15 @@ lpfc_work_done(struct lpfc_hba *phba)
/* Process SLI4 events */
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
- if (phba->hba_flag & HBA_RRQ_ACTIVE)
+ if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag))
lpfc_handle_rrq_active(phba);
- if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+ if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag))
lpfc_sli4_els_xri_abort_event_proc(phba);
- if (phba->hba_flag & ASYNC_EVENT)
+ if (test_bit(ASYNC_EVENT, &phba->hba_flag))
lpfc_sli4_async_event_proc(phba);
- if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
- spin_unlock_irq(&phba->hbalock);
+ if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER,
+ &phba->hba_flag))
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- }
if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
lpfc_sli4_fcf_redisc_event_proc(phba);
}
@@ -1035,11 +1034,11 @@ lpfc_work_done(struct lpfc_hba *phba)
status >>= (4*LPFC_ELS_RING);
if (pring && (status & HA_RXMASK ||
pring->flag & LPFC_DEFERRED_RING_EVENT ||
- phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+ test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Preserve legacy behavior. */
- if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
+ if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
/* Driver could have abort request completed in queue
@@ -1420,7 +1419,8 @@ lpfc_linkup(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
/* reinitialize initial HBA flag */
- phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
+ clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
+ clear_bit(HBA_RHBA_CMPL, &phba->hba_flag);
return 0;
}
@@ -1505,7 +1505,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* don't perform discovery for SLI4 loopback diagnostic test */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(phba->hba_flag & HBA_FCOE_MODE) &&
+ !test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
(phba->link_flag & LS_LOOPBACK_MODE))
return;
@@ -1548,7 +1548,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto sparam_out;
}
- phba->hba_flag |= HBA_DEFER_FLOGI;
+ set_bit(HBA_DEFER_FLOGI, &phba->hba_flag);
} else {
lpfc_initial_flogi(vport);
}
@@ -1617,27 +1617,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */
- if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
- lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
goto fail_out;
/* Mark successful completion of FCF table scan */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
if (vport->port_state != LPFC_FLOGI) {
- phba->hba_flag |= FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_issue_init_vfi(vport);
- goto out;
}
- spin_unlock_irq(&phba->hbalock);
goto out;
fail_out:
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
out:
mempool_free(mboxq, phba->mbox_mem_pool);
}
@@ -1867,32 +1863,31 @@ lpfc_register_fcf(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* If the FCF is not available do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
return;
}
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
if (phba->pport->port_state != LPFC_FLOGI &&
test_bit(FC_FABRIC, &phba->pport->fc_flag)) {
- phba->hba_flag |= FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_initial_flogi(phba->pport);
return;
}
- spin_unlock_irq(&phba->hbalock);
return;
}
spin_unlock_irq(&phba->hbalock);
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!fcf_mbxq) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
return;
}
@@ -1901,9 +1896,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
}
@@ -1956,7 +1950,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
return 0;
- if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) {
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
@@ -2151,8 +2145,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2833 Stop FCF discovery process due to link "
"state change (x%x)\n", phba->link_state);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
}
@@ -2380,9 +2375,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
int rc;
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2872 Devloss tmo with no eligible "
"FCF, unregister in-use FCF (x%x) "
@@ -2392,8 +2385,9 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
goto stop_flogi_current_fcf;
}
/* Mark the end to FLOGI roundrobin failover */
- phba->hba_flag &= ~FCF_RR_INPROG;
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
/* Allow action to new fcf asynchronous event */
+ spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -2630,9 +2624,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
}
@@ -2873,10 +2865,10 @@ read_next_fcf:
phba->fcoe_eventtag_at_fcf_scan,
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(HBA_DEVLOSS_TMO,
+ &phba->hba_flag)) {
+ clear_bit(FCF_TS_INPROG,
+ &phba->hba_flag);
/* Unregister in-use FCF and rescan */
lpfc_printf_log(phba, KERN_INFO,
LOG_FIP,
@@ -2889,8 +2881,7 @@ read_next_fcf:
/*
* Let next new FCF event trigger fast failover
*/
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
return;
}
/*
@@ -2996,8 +2987,8 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (phba->link_state < LPFC_LINK_UP) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
goto out;
}
@@ -3008,7 +2999,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record. "
- "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+ "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag,
phba->fcf.fcf_flag);
lpfc_unregister_fcf_rescan(phba);
goto out;
@@ -3471,9 +3462,9 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check if sending the FLOGI is being deferred to after we get
* up to date CSPs from MBX_READ_SPARAM.
*/
- if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) {
lpfc_initial_flogi(vport);
- phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag);
}
return;
@@ -3495,7 +3486,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
spin_lock_irqsave(&phba->hbalock, iflags);
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
case LPFC_LINK_SPEED_1GHZ:
case LPFC_LINK_SPEED_2GHZ:
@@ -3611,7 +3602,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!cfglink_mbox)
goto out;
@@ -3631,7 +3622,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* is phase 1 implementation that support FCF index 0 and driver
* defaults.
*/
- if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) {
fcf_record = kzalloc(sizeof(struct fcf_record),
GFP_KERNEL);
if (unlikely(!fcf_record)) {
@@ -3661,12 +3652,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
return;
- }
/* This is the initial FCF discovery scan */
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag |= FCF_INIT_DISC;
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
@@ -6997,11 +6986,11 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
* registered, do nothing.
*/
spin_lock_irq(&phba->hbalock);
- if (!(phba->hba_flag & HBA_FCOE_MODE) ||
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) ||
!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
- !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+ !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) ||
(phba->fcf.fcf_flag & FCF_DISCOVERY) ||
- (phba->pport->port_state == LPFC_FLOGI)) {
+ phba->pport->port_state == LPFC_FLOGI) {
spin_unlock_irq(&phba->hbalock);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 367e6b066d42..500253007b1d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2146,6 +2146,14 @@ struct sli4_sge { /* SLI-4 */
uint32_t sge_len;
};
+struct sli4_sge_le {
+ __le32 addr_hi;
+ __le32 addr_lo;
+
+ __le32 word2;
+ __le32 sge_len;
+};
+
struct sli4_hybrid_sgl {
struct list_head list_node;
struct sli4_sge *dma_sgl;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f7a0aa3625f4..e1dfa96c2a55 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -567,7 +567,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Initialize ERATT handling flag */
- phba->hba_flag &= ~HBA_ERATT_HANDLED;
+ clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
/* Enable appropriate host interrupts */
if (lpfc_readl(phba->HCregaddr, &status)) {
@@ -599,13 +599,14 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
+ clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
+ clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
- if (phba->hba_flag & LINK_DISABLED) {
+ if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2598 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
@@ -925,9 +926,7 @@ lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event;
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
/* Get the response iocb from the head of work queue */
@@ -1228,18 +1227,15 @@ static void
lpfc_rrq_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
- unsigned long iflag;
phba = from_timer(phba, t, rrq_tmr);
- spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
- phba->hba_flag |= HBA_RRQ_ACTIVE;
- else
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
+ clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
+ return;
+ }
- if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
- lpfc_worker_wake_up(phba);
+ set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
+ lpfc_worker_wake_up(phba);
}
/**
@@ -1261,11 +1257,8 @@ lpfc_rrq_timeout(struct timer_list *t)
static void
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
- unsigned long drvr_flag;
-
- spin_lock_irqsave(&phba->hbalock, drvr_flag);
- phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
- spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
+ clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
/* Check and reset heart-beat timer if necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -1457,7 +1450,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba)
int retval;
/* Is a Heartbeat mbox already in progress */
- if (phba->hba_flag & HBA_HBEAT_INP)
+ if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
return 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1473,7 +1466,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba)
mempool_free(pmboxq, phba->mbox_mem_pool);
return -ENXIO;
}
- phba->hba_flag |= HBA_HBEAT_INP;
+ set_bit(HBA_HBEAT_INP, &phba->hba_flag);
return 0;
}
@@ -1493,7 +1486,7 @@ lpfc_issue_hb_tmo(struct lpfc_hba *phba)
{
if (phba->cfg_enable_hba_heartbeat)
return;
- phba->hba_flag |= HBA_HBEAT_TMO;
+ set_bit(HBA_HBEAT_TMO, &phba->hba_flag);
}
/**
@@ -1565,7 +1558,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
- if (phba->hba_flag & HBA_HBEAT_INP)
+ if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
else
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
@@ -1574,7 +1567,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
spin_unlock_irq(&phba->pport->work_port_lock);
/* Check if a MBX_HEARTBEAT is already in progress */
- if (phba->hba_flag & HBA_HBEAT_INP) {
+ if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) {
/*
* If heart beat timeout called with HBA_HBEAT_INP set
* we need to give the hb mailbox cmd a chance to
@@ -1611,7 +1604,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
} else {
/* Check to see if we want to force a MBX_HEARTBEAT */
- if (phba->hba_flag & HBA_HBEAT_TMO) {
+ if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) {
retval = lpfc_issue_hb_mbox(phba);
if (retval)
tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
@@ -1699,9 +1692,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* since we cannot communicate with the pci card anyway.
*/
if (pci_channel_offline(phba->pcidev)) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~DEFER_ERATT;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(DEFER_ERATT, &phba->hba_flag);
return;
}
@@ -1752,9 +1743,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
phba->work_hs = old_host_status & ~HS_FFER1;
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~DEFER_ERATT;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(DEFER_ERATT, &phba->hba_flag);
phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
}
@@ -1798,9 +1787,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* since we cannot communicate with the pci card anyway.
*/
if (pci_channel_offline(phba->pcidev)) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~DEFER_ERATT;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(DEFER_ERATT, &phba->hba_flag);
return;
}
@@ -1811,7 +1798,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
/* Send an internal error event to mgmt application */
lpfc_board_errevt_to_mgmt(phba);
- if (phba->hba_flag & DEFER_ERATT)
+ if (test_bit(DEFER_ERATT, &phba->hba_flag))
lpfc_handle_deferred_eratt(phba);
if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
@@ -2026,7 +2013,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
return;
- if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
+ if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) {
lpfc_sli4_offline_eratt(phba);
return;
}
@@ -3319,9 +3306,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->hb_tmofunc);
if (phba->sli_rev == LPFC_SLI_REV4) {
del_timer_sync(&phba->rrq_tmr);
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
}
- phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
+ clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
+ clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -4785,7 +4773,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
- shost->max_cmd_len = 16;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
+ else
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN;
if (phba->sli_rev == LPFC_SLI_REV4) {
if (!phba->cfg_fcp_mq_threshold ||
@@ -4976,7 +4967,7 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
* Avoid reporting supported link speed for FCoE as it can't be
* controlled via FCoE.
*/
- if (phba->hba_flag & HBA_FCOE_MODE)
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag))
return;
if (phba->lmt & LMT_256Gb)
@@ -5490,7 +5481,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
* For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
* topology info. Note: Optional for non FC-AL ports.
*/
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto out_free_pmb;
@@ -6025,7 +6016,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
*/
if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
phba->link_state != LPFC_LINK_DOWN &&
- phba->hba_flag & HBA_SETUP) {
+ test_bit(HBA_SETUP, &phba->hba_flag)) {
mbpi = phba->cmf_last_sync_bw;
phba->cmf_last_sync_bw = 0;
extra = 0;
@@ -6778,11 +6769,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
}
/* If the FCF discovery is in progress, do nothing. */
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
break;
- }
+ spin_lock_irq(&phba->hbalock);
/* If fast FCF failover rescan event is pending, do nothing */
if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
spin_unlock_irq(&phba->hbalock);
@@ -7321,9 +7310,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
unsigned long iflags;
/* First, declare the async event has been handled */
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag &= ~ASYNC_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ clear_bit(ASYNC_EVENT, &phba->hba_flag);
/* Now, handle all the async events */
spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
@@ -8247,7 +8234,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* our max amount and we need to limit lpfc_sg_seg_cnt
* to minimize the risk of running out.
*/
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
sizeof(struct fcp_rsp) + max_buf_size;
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
@@ -8269,7 +8256,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* the FCP rsp, a SGE for each, and a SGE for up to
* cfg_sg_seg_cnt data segments.
*/
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
@@ -8332,7 +8319,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
&phba->pcidev->dev,
- sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_cmnd32) +
sizeof(struct fcp_rsp),
i, 0);
if (!phba->lpfc_cmd_rsp_buf_pool) {
@@ -9869,41 +9856,38 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
return;
}
/* FW supports persistent topology - override module parameter value */
- phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
/* if ASIC_GEN_NUM >= 0xC) */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_6) ||
(bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_FAMILY_G6)) {
- if (!tf) {
+ if (!tf)
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
? FLAGS_TOPOLOGY_MODE_LOOP
: FLAGS_TOPOLOGY_MODE_PT_PT);
- } else {
- phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
- }
+ else
+ clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
} else { /* G5 */
- if (tf) {
+ if (tf)
/* If topology failover set - pt is '0' or '1' */
phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
FLAGS_TOPOLOGY_MODE_LOOP_PT);
- } else {
+ else
phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
? FLAGS_TOPOLOGY_MODE_PT_PT
: FLAGS_TOPOLOGY_MODE_LOOP);
- }
}
- if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2020 Using persistent topology value [%s]",
lpfc_topo_to_str[phba->cfg_topology]);
- } else {
+ else
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2021 Invalid topology values from FW "
"Using driver parameter defined value [%s]",
lpfc_topo_to_str[phba->cfg_topology]);
- }
}
/**
@@ -10146,7 +10130,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
forced_link_speed =
bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
if (forced_link_speed) {
- phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+ set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag);
switch (forced_link_speed) {
case LINK_SPEED_1G:
@@ -12241,7 +12225,7 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
if (retval)
return intr_mode;
- phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
+ clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
if (cfg_mode == 2) {
/* Now, try to enable MSI-X interrupt mode */
@@ -14812,6 +14796,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_unset_pci_mem_s4;
}
+ spin_lock_init(&phba->rrq_list_lock);
INIT_LIST_HEAD(&phba->active_rrq_list);
INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
@@ -15528,7 +15513,7 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_IOQ_FLUSH)
+ test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
return PCI_ERS_RESULT_NEED_RESET;
switch (phba->pci_dev_grp) {
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index c4172791c267..f6a53446e57f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -47,6 +47,18 @@
#include "lpfc_debugfs.h"
+/* Called to clear RSCN discovery flags when driver is unloading. */
+static bool
+lpfc_check_unload_and_clr_rscn(unsigned long *fc_flag)
+{
+ /* If unloading, then clear the FC_RSCN_DEFERRED flag */
+ if (test_bit(FC_UNLOADING, fc_flag)) {
+ clear_bit(FC_RSCN_DEFERRED, fc_flag);
+ return false;
+ }
+ return test_bit(FC_RSCN_DEFERRED, fc_flag);
+}
+
/* Called to verify a rcv'ed ADISC was intended for us. */
static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -213,8 +225,10 @@ void
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(abort_list);
+ LIST_HEAD(drv_cmpl_list);
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
+ int retval = 0;
pring = lpfc_phba_elsring(phba);
@@ -250,11 +264,20 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
- list_del_init(&iocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&iocb->dlist);
+ retval = lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ spin_unlock_irq(&phba->hbalock);
+
+ if (retval && test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
+ list_del_init(&iocb->list);
+ list_add_tail(&iocb->list, &drv_cmpl_list);
+ }
}
+
+ lpfc_sli_cancel_iocbs(phba, &drv_cmpl_list, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
@@ -481,7 +504,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* must have ACCed the remote NPorts FLOGI to us
* to make it here.
*/
- if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
+ if (test_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag))
lpfc_els_abort_flogi(phba);
ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
@@ -1604,10 +1627,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
- /* Don't do anything that will mess up processing of the
- * previous RSCN.
- */
- if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
+ /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */
+ if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PLOGI */
@@ -1790,10 +1811,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
- /* Don't do anything that will mess up processing of the
- * previous RSCN.
- */
- if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
+ /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */
+ if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding ADISC */
@@ -2059,10 +2078,8 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- /* Don't do anything that will mess up processing of the
- * previous RSCN.
- */
- if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
+ /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */
+ if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag))
return ndlp->nlp_state;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
@@ -2375,10 +2392,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
- /* Don't do anything that will mess up processing of the
- * previous RSCN.
- */
- if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
+ /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */
+ if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PRLI */
@@ -2894,10 +2909,8 @@ static uint32_t
lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- /* Don't do anything that will mess up processing of the
- * previous RSCN.
- */
- if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
+ /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */
+ if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag))
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index c5792eaf3f64..d70da2736c94 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -95,7 +95,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) ||
- vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag))
return -ENODEV;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -272,7 +272,7 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
remoteport = lpfc_rport->remoteport;
if (!vport->localport ||
- vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag))
return -EINVAL;
lport = vport->localport->private;
@@ -569,7 +569,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
- if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag))
return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
@@ -675,7 +675,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
if (test_bit(FC_UNLOADING, &vport->load_flag) ||
- vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag))
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1568,7 +1568,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) ||
- phba->hba_flag & HBA_IOQ_FLUSH) {
+ test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1909,24 +1909,19 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
- /* Guard against IO completion being called at same time */
- spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
-
- /* If the hba is getting reset, this flag is set. It is
- * cleared when the reset is complete and rings reestablished.
- */
- spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock(&phba->hbalock);
- spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
+ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6139 Driver in reset cleanup - flushing "
- "NVME Req now. hba_flag x%x\n",
+ "NVME Req now. hba_flag x%lx\n",
phba->hba_flag);
return;
}
+ /* Guard against IO completion being called at same time */
+ spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
+ spin_lock(&phba->hbalock);
+
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 561ced5503c6..5297cacc8beb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1811,7 +1811,9 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
ctxp->flag &= ~LPFC_NVME_XBUSY;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ spin_lock_irqsave(&phba->rrq_list_lock, iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
@@ -3393,14 +3395,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* If the hba is getting reset, this flag is set. It is
* cleared when the reset is complete and rings reestablished.
*/
- spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6163 Driver in reset cleanup - flushing "
- "NVME Req now. hba_flag x%x oxid x%x\n",
+ "NVME Req now. hba_flag x%lx oxid x%x\n",
phba->hba_flag, ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
spin_lock_irqsave(&ctxp->ctxlock, flags);
@@ -3409,6 +3409,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
+ spin_lock_irqsave(&phba->hbalock, flags);
/* Outstanding abort is in progress */
if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4a6e5223a224..98ce9d97a225 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -474,9 +474,11 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
ndlp = psb->rdata->pnode;
else
ndlp = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ spin_lock_irqsave(&phba->rrq_list_lock, iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
if (ndlp && !offline) {
lpfc_set_rrq_active(phba, ndlp,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
@@ -598,7 +600,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_sli4_hdw_queue *qp;
- struct sli4_sge *sgl;
+ struct sli4_sge_le *sgl;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
uint32_t cpu, idx;
@@ -649,23 +651,23 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* The balance are sg list bdes. Initialize the
* first two and leave the rest for queuecommand.
*/
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
+ sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+ bf_set_le32(lpfc_sli4_sge_last, sgl, 0);
+ if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN)
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32));
+ else
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+
sgl++;
/* Setup the physical region for the FCP RSP */
- pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+ pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32);
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
+ bf_set_le32(lpfc_sli4_sge_last, sgl, 1);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
@@ -2606,7 +2608,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
iocb_cmd->ulpLe = 1;
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
- fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+ fcp_cmnd->fcpDl = cpu_to_be32(fcpdl);
/*
* Due to difference in data length between DIF/non-DIF paths,
@@ -3223,14 +3225,18 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* explicitly reinitialized.
* all iocb memory resources are reused.
*/
- fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+ if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN)
+ ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl =
+ cpu_to_be32(scsi_bufflen(scsi_cmnd));
+ else
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
/* Set first-burst provided it was successfully negotiated */
- if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
vport->cfg_first_burst_size &&
scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
u32 init_len, total_len;
- total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ total_len = scsi_bufflen(scsi_cmnd);
init_len = min(total_len, vport->cfg_first_burst_size);
/* Word 4 & 5 */
@@ -3418,15 +3424,18 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
- fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+ if (lpfc_cmd->pCmd->cmd_len > LPFC_FCP_CDB_LEN)
+ ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = cpu_to_be32(fcpdl);
+ else
+ fcp_cmnd->fcpDl = cpu_to_be32(fcpdl);
/* Set first-burst provided it was successfully negotiated */
- if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
vport->cfg_first_burst_size &&
scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
u32 init_len, total_len;
- total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ total_len = fcpdl;
init_len = min(total_len, vport->cfg_first_burst_size);
/* Word 4 & 5 */
@@ -3434,8 +3443,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
wqe->fcp_iwrite.total_xfer_len = total_len;
} else {
/* Word 4 */
- wqe->fcp_iwrite.total_xfer_len =
- be32_to_cpu(fcp_cmnd->fcpDl);
+ wqe->fcp_iwrite.total_xfer_len = fcpdl;
}
/*
@@ -3892,7 +3900,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
fcprsp->rspInfo3);
scsi_set_resid(cmnd, 0);
- fcpDl = be32_to_cpu(fcpcmd->fcpDl);
+ if (cmnd->cmd_len > LPFC_FCP_CDB_LEN)
+ fcpDl = be32_to_cpu(((struct fcp_cmnd32 *)fcpcmd)->fcpDl);
+ else
+ fcpDl = be32_to_cpu(fcpcmd->fcpDl);
if (resp_info & RESID_UNDER) {
scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
@@ -4721,6 +4732,14 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
LPFC_WQE_IOD_NONE);
}
+
+ /* Additional fcp cdb length field calculation.
+ * LPFC_FCP_CDB_LEN_32 - normal 16 byte cdb length,
+ * then divide by 4 for the word count.
+ * shift 2 because of the RDDATA/WRDATA.
+ */
+ if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN)
+ fcp_cmnd->fcpCntl3 |= 4 << 2;
} else {
/* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
@@ -4741,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
/* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd,
- sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp));
/* Word 6 */
bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
@@ -4796,7 +4815,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
- ptr = &fcp_cmnd->fcpCdb[0];
+ ptr = &((struct fcp_cmnd32 *)fcp_cmnd)->fcpCdb[0];
memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
ptr += scsi_cmnd->cmd_len;
@@ -5041,7 +5060,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
/* Check for valid Emulex Device ID */
if (phba->sli_rev != LPFC_SLI_REV4 ||
- phba->hba_flag & HBA_FCOE_MODE) {
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8347 Incapable PCI reset device: "
"0x%04x\n", ptr->device);
@@ -5327,7 +5346,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
cmnd->cmnd[0],
scsi_prot_ref_tag(cmnd),
scsi_logical_block_count(cmnd),
- (cmnd->cmnd[1]>>5));
+ scsi_get_prot_type(cmnd));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
@@ -5518,7 +5537,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
+ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index eae56944f31b..a05d203e4777 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -24,6 +24,7 @@
struct lpfc_hba;
#define LPFC_FCP_CDB_LEN 16
+#define LPFC_FCP_CDB_LEN_32 32
#define list_remove_head(list, entry, type, member) \
do { \
@@ -99,17 +100,11 @@ struct fcp_rsp {
#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */
};
-struct fcp_cmnd {
- struct scsi_lun fcp_lun;
-
- uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
- uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
#define SIMPLE_Q 0x00
#define HEAD_OF_Q 0x01
#define ORDERED_Q 0x02
#define ACA_Q 0x04
#define UNTAGGED 0x05
- uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */
#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */
#define FCP_BUS_RESET 0x08 /* bit 3 */
@@ -117,12 +112,31 @@ struct fcp_cmnd {
#define FCP_TARGET_RESET 0x20 /* bit 5 */
#define FCP_CLEAR_ACA 0x40 /* bit 6 */
#define FCP_TERMINATE_TASK 0x80 /* bit 7 */
- uint8_t fcpCntl3;
#define WRITE_DATA 0x01 /* Bit 0 */
#define READ_DATA 0x02 /* Bit 1 */
+struct fcp_cmnd {
+ struct scsi_lun fcp_lun;
+
+ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
+ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
+ uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
+ uint8_t fcpCntl3;
+
uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
- uint32_t fcpDl; /* Total transfer length */
+ __be32 fcpDl; /* Total transfer length */
+
+};
+struct fcp_cmnd32 {
+ struct scsi_lun fcp_lun;
+
+ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
+ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
+ uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
+ uint8_t fcpCntl3;
+
+ uint8_t fcpCdb[LPFC_FCP_CDB_LEN_32]; /* SRB cdb field is copied here */
+ __be32 fcpDl; /* Total transfer length */
};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a028e008dd1e..f475e7ece41a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1024,9 +1024,9 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
unsigned long iflags;
LIST_HEAD(send_rrq);
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
if (time_after(jiffies, rrq->rrq_stop_time))
@@ -1034,7 +1034,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
else if (time_before(rrq->rrq_stop_time, next_time))
next_time = rrq->rrq_stop_time;
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
if ((!list_empty(&phba->active_rrq_list)) &&
(!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
mod_timer(&phba->rrq_tmr, next_time);
@@ -1072,16 +1072,16 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
if (phba->sli_rev != LPFC_SLI_REV4)
return NULL;
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
if (rrq->vport == vport && rrq->xritag == xri &&
rrq->nlp_DID == did){
list_del(&rrq->list);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
return rrq;
}
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
return NULL;
}
@@ -1109,7 +1109,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_sli4_vport_delete_els_xri_aborted(vport);
lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
if (rrq->vport != vport)
continue;
@@ -1118,7 +1118,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_move(&rrq->list, &rrq_list);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
list_del(&rrq->list);
@@ -1179,12 +1179,12 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (!phba->cfg_enable_rrq)
return -EINVAL;
- spin_lock_irqsave(&phba->hbalock, iflags);
if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- goto out;
+ clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
+ goto outnl;
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
goto out;
@@ -1213,16 +1213,18 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
- spin_lock_irqsave(&phba->hbalock, iflags);
+
+ spin_lock_irqsave(&phba->rrq_list_lock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
- phba->hba_flag |= HBA_RRQ_ACTIVE;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
+ set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
if (empty)
lpfc_worker_wake_up(phba);
return 0;
out:
spin_unlock_irqrestore(&phba->hbalock, iflags);
+outnl:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2921 Can't set rrq active xri:0x%x rxid:0x%x"
" DID:0x%x Send:%d\n",
@@ -3937,7 +3939,7 @@ void lpfc_poll_eratt(struct timer_list *t)
uint64_t sli_intr, cnt;
phba = from_timer(phba, t, eratt_poll);
- if (!(phba->hba_flag & HBA_SETUP))
+ if (!test_bit(HBA_SETUP, &phba->hba_flag))
return;
if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
@@ -4522,9 +4524,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
unsigned long iflag;
int count = 0;
- spin_lock_irqsave(&phba->hbalock, iflag);
- phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
/* Get the response iocb from the head of work queue */
spin_lock_irqsave(&phba->hbalock, iflag);
@@ -4681,10 +4681,8 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
uint32_t i;
struct lpfc_iocbq *piocb, *next_iocb;
- spin_lock_irq(&phba->hbalock);
/* Indicate the I/O queues are flushed */
- phba->hba_flag |= HBA_IOQ_FLUSH;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
@@ -4762,7 +4760,7 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
if (lpfc_readl(phba->HSregaddr, &status))
return 1;
- phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+ set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
/*
* Check status register every 100ms for 5 retries, then every
@@ -4841,7 +4839,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
} else
phba->sli4_hba.intr_enable = 0;
- phba->hba_flag &= ~HBA_SETUP;
+ clear_bit(HBA_SETUP, &phba->hba_flag);
return retval;
}
@@ -5093,7 +5091,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
/* perform board reset */
phba->fc_eventTag = 0;
phba->link_events = 0;
- phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+ set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
if (phba->pport) {
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
@@ -5153,7 +5151,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0295 Reset HBA Data: x%x x%x x%x\n",
+ "0295 Reset HBA Data: x%x x%x x%lx\n",
phba->pport->port_state, psli->sli_flag,
phba->hba_flag);
@@ -5162,7 +5160,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
- phba->hba_flag &= ~HBA_SETUP;
+ clear_bit(HBA_SETUP, &phba->hba_flag);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
@@ -5406,7 +5404,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return -EIO;
}
- phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+ set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
@@ -5708,11 +5706,11 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
int longs;
/* Enable ISR already does config_port because of config_msi mbx */
- if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
+ if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) {
rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
if (rc)
return -EIO;
- phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
+ clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
}
phba->fcp_embed_io = 0; /* SLI4 FC support only */
@@ -7759,7 +7757,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
snprintf(mbox->u.mqe.un.set_host_data.un.data,
LPFC_HOST_OS_DRIVER_VERSION_SIZE,
"Linux %s v"LPFC_DRIVER_VERSION,
- (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC");
}
int
@@ -8487,7 +8485,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
}
- phba->hba_flag &= ~HBA_SETUP;
+ clear_bit(HBA_SETUP, &phba->hba_flag);
lpfc_sli4_dip(phba);
@@ -8516,25 +8514,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
- phba->hba_flag |= HBA_FCOE_MODE;
+ set_bit(HBA_FCOE_MODE, &phba->hba_flag);
phba->fcp_embed_io = 0; /* SLI4 FC support only */
} else {
- phba->hba_flag &= ~HBA_FCOE_MODE;
+ clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
}
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE)
- phba->hba_flag |= HBA_FIP_SUPPORT;
+ set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
else
- phba->hba_flag &= ~HBA_FIP_SUPPORT;
+ clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
- phba->hba_flag &= ~HBA_IOQ_FLUSH;
+ clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
if (phba->sli_rev != LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
- phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+ phba->sli_rev,
+ test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0);
rc = -EIO;
kfree(vpd);
goto out_free_mbox;
@@ -8549,7 +8548,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions, only read parameters if the
* board is FCoE
*/
- if (phba->hba_flag & HBA_FCOE_MODE &&
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
lpfc_sli4_read_fcoe_params(phba))
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
@@ -8626,7 +8625,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc == MBX_SUCCESS) {
- phba->hba_flag |= HBA_RECOVERABLE_UE;
+ set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag);
/* Set 1Sec interval to detect UE */
phba->eratt_poll_interval = 1;
phba->sli4_hba.ue_to_sr = bf_get(
@@ -8677,7 +8676,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/* Performance Hints are ONLY for FCoE */
- if (phba->hba_flag & HBA_FCOE_MODE) {
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
else
@@ -8936,7 +8935,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
lpfc_sli4_node_prep(phba);
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
/*
* The FC Port needs to register FCFI (index 0)
@@ -9012,7 +9011,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
+ clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
+ clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
/* start eq_delay heartbeat */
@@ -9054,8 +9054,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Setup CMF after HBA is initialized */
lpfc_cmf_setup(phba);
- if (!(phba->hba_flag & HBA_FCOE_MODE) &&
- (phba->hba_flag & LINK_DISABLED)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
+ test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3103 Adapter Link is disabled.\n");
lpfc_down_link(phba, mboxq);
@@ -9079,7 +9079,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
- phba->hba_flag |= HBA_SETUP;
+ set_bit(HBA_SETUP, &phba->hba_flag);
return rc;
out_io_buff_free:
@@ -9383,7 +9383,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* If HBA has a deferred error attention, fail the iocb. */
- if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
goto out_not_finished;
}
@@ -10447,7 +10447,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
/* If HBA has a deferred error attention, fail the iocb. */
- if (unlikely(phba->hba_flag & DEFER_ERATT))
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
return IOCB_ERROR;
/*
@@ -10595,18 +10595,18 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
BUFF_TYPE_BDE_IMMED;
wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
+ wqe->generic.bde.addrLow = 72; /* Word 18 */
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ /* Word 18-29 FCP CMND Payload */
+ ptr = &wqe->words[18];
+ memcpy(ptr, fcp_cmnd, sgl->sge_len);
} else {
/* Word 0-2 - Inline BDE */
wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
wqe->generic.bde.addrHigh = sgl->addr_hi;
wqe->generic.bde.addrLow = sgl->addr_lo;
@@ -12361,10 +12361,10 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0139 Ignoring ELS cmd code x%x completion Data: "
+ "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: "
"x%x x%x x%x x%px\n",
- ulp_command, ulp_status, ulp_word4, iotag,
- cmdiocb->ndlp);
+ ulp_command, kref_read(&cmdiocb->ndlp->kref),
+ ulp_status, ulp_word4, iotag, cmdiocb->ndlp);
/*
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
@@ -12460,7 +12460,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- if (phba->link_state < LPFC_LINK_UP ||
+ /* Just close the exchange under certain conditions. */
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+ phba->link_state < LPFC_LINK_UP ||
(phba->sli_rev == LPFC_SLI_REV4 &&
phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
(phba->link_flag & LS_EXTERNAL_LOOPBACK))
@@ -12507,10 +12509,10 @@ abort_iotag_exit:
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort IO XRI x%x, Original iotag x%x, "
"abort tag x%x Cmdjob : x%px Abortjob : x%px "
- "retval x%x\n",
+ "retval x%x : IA %d\n",
ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
- retval);
+ retval, ia);
if (retval) {
cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -12775,7 +12777,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
int i;
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH)
+ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
return errcnt;
for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -12845,15 +12847,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
bool ia;
- spin_lock_irqsave(&phba->hbalock, iflags);
-
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
return 0;
- }
+
sum = 0;
+ spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
@@ -13385,7 +13385,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
- phba->hba_flag |= DEFER_ERATT;
+ set_bit(DEFER_ERATT, &phba->hba_flag);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
@@ -13394,7 +13394,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
return 0;
@@ -13405,7 +13405,7 @@ unplug_err:
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
@@ -13441,7 +13441,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
&uerr_sta_hi)) {
phba->work_hs |= UNPLUG_ERR;
phba->work_ha |= HA_ERATT;
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
@@ -13457,7 +13457,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
phba->work_ha |= HA_ERATT;
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
break;
@@ -13469,7 +13469,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
&portsmphr)){
phba->work_hs |= UNPLUG_ERR;
phba->work_ha |= HA_ERATT;
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
@@ -13492,7 +13492,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[0],
phba->work_status[1]);
phba->work_ha |= HA_ERATT;
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
return 1;
}
break;
@@ -13529,22 +13529,18 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
return 0;
/* Check if interrupt handler handles this ERATT */
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_ERATT_HANDLED) {
+ if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
/* Interrupt handler has handled ERATT */
- spin_unlock_irq(&phba->hbalock);
return 0;
- }
/*
* If there is deferred error attention, do not check for error
* attention
*/
- if (unlikely(phba->hba_flag & DEFER_ERATT)) {
- spin_unlock_irq(&phba->hbalock);
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
return 0;
- }
+ spin_lock_irq(&phba->hbalock);
/* If PCI channel is offline, don't process it */
if (unlikely(pci_channel_offline(phba->pcidev))) {
spin_unlock_irq(&phba->hbalock);
@@ -13666,19 +13662,17 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
ha_copy &= ~HA_ERATT;
/* Check the need for handling ERATT in interrupt handler */
if (ha_copy & HA_ERATT) {
- if (phba->hba_flag & HBA_ERATT_HANDLED)
+ if (test_and_set_bit(HBA_ERATT_HANDLED,
+ &phba->hba_flag))
/* ERATT polling has handled ERATT */
ha_copy &= ~HA_ERATT;
- else
- /* Indicate interrupt handler handles ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
}
/*
* If there is deferred error attention, do not check for any
* interrupt.
*/
- if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -13774,7 +13768,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7 | HS_FFER8) &
phba->work_hs)) {
- phba->hba_flag |= DEFER_ERATT;
+ set_bit(DEFER_ERATT, &phba->hba_flag);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
@@ -13961,16 +13955,16 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
/* Need to read HA REG for FCP ring and other ring events */
if (lpfc_readl(phba->HAregaddr, &ha_copy))
return IRQ_HANDLED;
- /* Clear up only attention source related to fast-path */
- spin_lock_irqsave(&phba->hbalock, iflag);
+
/*
* If there is deferred error attention, do not check for
* any interrupt.
*/
- if (unlikely(phba->hba_flag & DEFER_ERATT)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
return IRQ_NONE;
- }
+
+ /* Clear up only attention source related to fast-path */
+ spin_lock_irqsave(&phba->hbalock, iflag);
writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
@@ -14053,18 +14047,15 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
spin_unlock(&phba->hbalock);
return IRQ_NONE;
} else if (phba->ha_copy & HA_ERATT) {
- if (phba->hba_flag & HBA_ERATT_HANDLED)
+ if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
/* ERATT polling has handled ERATT */
phba->ha_copy &= ~HA_ERATT;
- else
- /* Indicate interrupt handler handles ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
}
/*
* If there is deferred error attention, do not check for any interrupt.
*/
- if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
spin_unlock(&phba->hbalock);
return IRQ_NONE;
}
@@ -14135,9 +14126,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
unsigned long iflags;
/* First, declare the els xri abort event has been handled */
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
/* Now, handle all the els xri abort events */
spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
@@ -14263,9 +14252,7 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
/* Set the async event flag */
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag |= ASYNC_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ set_bit(ASYNC_EVENT, &phba->hba_flag);
return true;
}
@@ -14505,8 +14492,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&irspiocbq->cq_event.list,
&phba->sli4_hba.sp_queue_event);
- phba->hba_flag |= HBA_SP_QUEUE_EVT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
+ set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
return true;
}
@@ -14580,7 +14567,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Set the els xri abort event flag */
- phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+ set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
iflags);
workposted = true;
@@ -14667,9 +14654,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
/* save off the frame for the work thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
- /* Frame received */
- phba->hba_flag |= HBA_SP_QUEUE_EVT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
+ /* Frame received */
+ set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
workposted = true;
break;
case FC_STATUS_INSUFF_BUF_FRM_DISC:
@@ -14689,9 +14676,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
workposted = true;
break;
case FC_STATUS_RQ_DMA_FAILURE:
@@ -19349,8 +19334,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&dmabuf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
- phba->hba_flag |= HBA_SP_QUEUE_EVT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
+ set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
lpfc_worker_wake_up(phba);
return;
}
@@ -20102,9 +20087,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FCF_TS_INPROG, &phba->hba_flag);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
@@ -20120,9 +20103,7 @@ fail_fcf_scan:
if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
/* FCF scan failed, clear FCF_TS_INPROG flag */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
}
return error;
}
@@ -20779,7 +20760,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
/* This HBA contains PORT_STE configured */
if (!rgn23_data[offset + 2])
- phba->hba_flag |= LINK_DISABLED;
+ set_bit(LINK_DISABLED, &phba->hba_flag);
goto out;
}
@@ -22488,7 +22469,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
}
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
- sizeof(struct fcp_cmnd));
+ sizeof(struct fcp_cmnd32));
spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
@@ -22593,12 +22574,13 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
u8 cmnd;
u32 *pcmd;
u32 if_type = 0;
- u32 fip, abort_tag;
+ u32 abort_tag;
+ bool fip;
struct lpfc_nodelist *ndlp = NULL;
union lpfc_wqe128 *wqe = &job->wqe;
u8 command_type = ELS_COMMAND_NON_FIP;
- fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
/* The fcp commands will set command type */
if (job->cmd_flag & LPFC_IO_FCP)
command_type = FCP_COMMAND;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 915f2f11fb55..f06087e47859 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.1"
+#define LPFC_DRIVER_VERSION "14.4.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 181f16899fdc..a402c4dc4645 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -534,7 +534,13 @@ static void __exit mac_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
}
-static struct platform_driver mac_scsi_driver = {
+/*
+ * mac_scsi_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver mac_scsi_driver __refdata = {
.remove_new = __exit_p(mac_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 3f2ce1eb081c..56b76d73895b 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -3,85 +3,84 @@ config MEGARAID_NEWGEN
bool "LSI Logic New Generation RAID Device Drivers"
depends on PCI && HAS_IOPORT && SCSI
help
- LSI Logic RAID Device Drivers
+ LSI Logic RAID Device Drivers
config MEGARAID_MM
tristate "LSI Logic Management Module (New Driver)"
depends on PCI && HAS_IOPORT && SCSI && MEGARAID_NEWGEN
help
- Management Module provides ioctl, sysfs support for LSI Logic
- RAID controllers.
- To compile this driver as a module, choose M here: the
- module will be called megaraid_mm
+ Management Module provides ioctl, sysfs support for LSI Logic
+ RAID controllers.
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mm
config MEGARAID_MAILBOX
tristate "LSI Logic MegaRAID Driver (New Driver)"
depends on PCI && SCSI && MEGARAID_MM
help
- List of supported controllers
+ List of supported controllers
- OEM Product Name VID :DID :SVID:SSID
- --- ------------ ---- ---- ---- ----
- Dell PERC3/QC 101E:1960:1028:0471
- Dell PERC3/DC 101E:1960:1028:0493
- Dell PERC3/SC 101E:1960:1028:0475
- Dell PERC3/Di 1028:000E:1028:0123
- Dell PERC4/SC 1000:1960:1028:0520
- Dell PERC4/DC 1000:1960:1028:0518
- Dell PERC4/QC 1000:0407:1028:0531
- Dell PERC4/Di 1028:000F:1028:014A
- Dell PERC 4e/Si 1028:0013:1028:016c
- Dell PERC 4e/Di 1028:0013:1028:016d
- Dell PERC 4e/Di 1028:0013:1028:016e
- Dell PERC 4e/Di 1028:0013:1028:016f
- Dell PERC 4e/Di 1028:0013:1028:0170
- Dell PERC 4e/DC 1000:0408:1028:0002
- Dell PERC 4e/SC 1000:0408:1028:0001
- LSI MegaRAID SCSI 320-0 1000:1960:1000:A520
- LSI MegaRAID SCSI 320-1 1000:1960:1000:0520
- LSI MegaRAID SCSI 320-2 1000:1960:1000:0518
- LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530
- LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532
- LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531
- LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001
- LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002
- LSI MegaRAID SATA 150-4 1000:1960:1000:4523
- LSI MegaRAID SATA 150-6 1000:1960:1000:0523
- LSI MegaRAID SATA 300-4X 1000:0409:1000:3004
- LSI MegaRAID SATA 300-8X 1000:0409:1000:3008
- INTEL RAID Controller SRCU42X 1000:0407:8086:0532
- INTEL RAID Controller SRCS16 1000:1960:8086:0523
- INTEL RAID Controller SRCU42E 1000:0408:8086:0002
- INTEL RAID Controller SRCZCRX 1000:0407:8086:0530
- INTEL RAID Controller SRCS28X 1000:0409:8086:3008
- INTEL RAID Controller SROMBU42E 1000:0408:8086:3431
- INTEL RAID Controller SROMBU42E 1000:0408:8086:3499
- INTEL RAID Controller SRCU51L 1000:1960:8086:0520
- FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065
- ACER MegaRAID ROMB-2E 1000:0408:1025:004D
- NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287
+ OEM Product Name VID :DID :SVID:SSID
+ --- ------------ ---- ---- ---- ----
+ Dell PERC3/QC 101E:1960:1028:0471
+ Dell PERC3/DC 101E:1960:1028:0493
+ Dell PERC3/SC 101E:1960:1028:0475
+ Dell PERC3/Di 1028:000E:1028:0123
+ Dell PERC4/SC 1000:1960:1028:0520
+ Dell PERC4/DC 1000:1960:1028:0518
+ Dell PERC4/QC 1000:0407:1028:0531
+ Dell PERC4/Di 1028:000F:1028:014A
+ Dell PERC 4e/Si 1028:0013:1028:016c
+ Dell PERC 4e/Di 1028:0013:1028:016d
+ Dell PERC 4e/Di 1028:0013:1028:016e
+ Dell PERC 4e/Di 1028:0013:1028:016f
+ Dell PERC 4e/Di 1028:0013:1028:0170
+ Dell PERC 4e/DC 1000:0408:1028:0002
+ Dell PERC 4e/SC 1000:0408:1028:0001
+ LSI MegaRAID SCSI 320-0 1000:1960:1000:A520
+ LSI MegaRAID SCSI 320-1 1000:1960:1000:0520
+ LSI MegaRAID SCSI 320-2 1000:1960:1000:0518
+ LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530
+ LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532
+ LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531
+ LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001
+ LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002
+ LSI MegaRAID SATA 150-4 1000:1960:1000:4523
+ LSI MegaRAID SATA 150-6 1000:1960:1000:0523
+ LSI MegaRAID SATA 300-4X 1000:0409:1000:3004
+ LSI MegaRAID SATA 300-8X 1000:0409:1000:3008
+ INTEL RAID Controller SRCU42X 1000:0407:8086:0532
+ INTEL RAID Controller SRCS16 1000:1960:8086:0523
+ INTEL RAID Controller SRCU42E 1000:0408:8086:0002
+ INTEL RAID Controller SRCZCRX 1000:0407:8086:0530
+ INTEL RAID Controller SRCS28X 1000:0409:8086:3008
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3431
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3499
+ INTEL RAID Controller SRCU51L 1000:1960:8086:0520
+ FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065
+ ACER MegaRAID ROMB-2E 1000:0408:1025:004D
+ NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287
- To compile this driver as a module, choose M here: the
- module will be called megaraid_mbox
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mbox
config MEGARAID_LEGACY
tristate "LSI Logic Legacy MegaRAID Driver"
depends on PCI && HAS_IOPORT && SCSI
help
- This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
- and 467 SCSI host adapters. This driver also support the all U320
- RAID controllers
+ This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
+ and 467 SCSI host adapters. This driver also support the all U320
+ RAID controllers
- To compile this driver as a module, choose M here: the
- module will be called megaraid
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid
config MEGARAID_SAS
tristate "LSI Logic MegaRAID SAS RAID Module"
depends on PCI && SCSI
select IRQ_POLL
help
- Module for LSI Logic's SAS based RAID controllers.
- To compile this driver as a module, choose 'm' here.
- Module will be called megaraid_sas
-
+ Module for LSI Logic's SAS based RAID controllers.
+ To compile this driver as a module, choose 'm' here.
+ Module will be called megaraid_sas
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 56624cbf7fa5..5680c6cdb221 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2701,7 +2701,7 @@ int megasas_get_ctrl_info(struct megasas_instance *instance);
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
- bool is_target_prop);
+ struct queue_limits *lim, bool is_target_prop);
int megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev);
void megasas_get_snapdump_properties(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3d4f13da1ae8..def0d905b6d9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1888,7 +1888,7 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
* Returns void
*/
void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
- bool is_target_prop)
+ struct queue_limits *lim, bool is_target_prop)
{
u16 pd_index = 0, ld;
u32 device_id;
@@ -1915,8 +1915,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
return;
raid = MR_LdRaidGet(ld, local_map_ptr);
- if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ if (lim)
+ lim->dma_alignment = 0x7;
+ }
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
@@ -1967,7 +1969,8 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
*
*/
static inline void
-megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
+megasas_set_nvme_device_properties(struct scsi_device *sdev,
+ struct queue_limits *lim, u32 max_io_size)
{
struct megasas_instance *instance;
u32 mr_nvme_pg_size;
@@ -1976,10 +1979,10 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
MR_DEFAULT_NVME_PAGE_SIZE);
- blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
+ lim->max_hw_sectors = max_io_size / 512;
+ lim->virt_boundary_mask = mr_nvme_pg_size - 1;
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
- blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
}
/*
@@ -2041,7 +2044,7 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
* @is_target_prop true, if fw provided target properties.
*/
static void megasas_set_static_target_properties(struct scsi_device *sdev,
- bool is_target_prop)
+ struct queue_limits *lim, bool is_target_prop)
{
u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
struct megasas_instance *instance;
@@ -2060,13 +2063,15 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
if (instance->nvme_page_size && max_io_size_kb)
- megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
+ megasas_set_nvme_device_properties(sdev, lim,
+ max_io_size_kb << 10);
megasas_set_fw_assisted_qd(sdev, is_target_prop);
}
-static int megasas_slave_configure(struct scsi_device *sdev)
+static int megasas_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
u16 pd_index = 0;
struct megasas_instance *instance;
@@ -2096,10 +2101,10 @@ static int megasas_slave_configure(struct scsi_device *sdev)
ret_target_prop = megasas_get_target_prop(instance, sdev);
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
- megasas_set_static_target_properties(sdev, is_target_prop);
+ megasas_set_static_target_properties(sdev, lim, is_target_prop);
/* This sdev property may change post OCR */
- megasas_set_dynamic_target_properties(sdev, is_target_prop);
+ megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
mutex_unlock(&instance->reset_mutex);
@@ -3507,7 +3512,7 @@ static const struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
.name = "Avago SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
- .slave_configure = megasas_slave_configure,
+ .device_configure = megasas_device_configure,
.slave_alloc = megasas_slave_alloc,
.slave_destroy = megasas_slave_destroy,
.queuecommand = megasas_queue_command,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c60014e07b44..6c1fb8149553 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -5119,7 +5119,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
ret_target_prop = megasas_get_target_prop(instance, sdev);
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
- megasas_set_dynamic_target_properties(sdev, is_target_prop);
+ megasas_set_dynamic_target_properties(sdev, NULL,
+ is_target_prop);
}
status_reg = instance->instancet->read_fw_status_reg
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 35f81af40f51..6a19e17eb1a7 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -309,6 +309,7 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_ACK_REQUIRED (0x02)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
@@ -1315,6 +1316,8 @@ struct mpi3_driver_page0 {
__le32 reserved18;
};
#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_DEVICEEXPOSURE_DISABLE (0x00000020)
+#define MPI3_DRIVER0_BSDOPTS_WRITECACHE_DISABLE (0x00000010)
#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 47035b811902..7df242190135 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -198,16 +198,17 @@ struct mpi3_supported_devices_data {
struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX];
};
-#ifndef MPI3_ENCRYPTED_HASH_MAX
-#define MPI3_ENCRYPTED_HASH_MAX (1)
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
#endif
struct mpi3_encrypted_hash_entry {
u8 hash_image_type;
u8 hash_algorithm;
u8 encryption_algorithm;
u8 reserved03;
- __le32 reserved04;
- __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+ __le16 public_key_size;
+ __le16 signature_size;
+ __le32 public_key[MPI3_PUBLIC_KEY_MAX];
};
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
@@ -228,17 +229,6 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04)
#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
-#ifndef MPI3_PUBLIC_KEY_MAX
-#define MPI3_PUBLIC_KEY_MAX (1)
-#endif
-struct mpi3_encrypted_key_with_hash_entry {
- u8 hash_image_type;
- u8 hash_algorithm;
- u8 encryption_algorithm;
- u8 reserved03;
- __le32 reserved04;
- __le32 public_key[MPI3_PUBLIC_KEY_MAX];
-};
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 0cb24fc03620..028784949873 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -27,7 +27,7 @@ struct mpi3_ioc_init_request {
__le64 sense_buffer_free_queue_address;
__le64 driver_information_address;
};
-
+#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
@@ -101,6 +101,8 @@ struct mpi3_ioc_facts_data {
__le16 max_io_throttle_group;
__le16 io_throttle_low;
__le16 io_throttle_high;
+ __le32 diag_fdl_size;
+ __le32 diag_tty_size;
};
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
@@ -108,13 +110,13 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
-#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
-#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
-#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
-#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_SUPPORTED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_SUPPORTED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_SUPPORTED (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED (0x00000002)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
@@ -159,6 +161,8 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_DIAGFDLSIZE_NOT_SUPPORTED (0x00000000)
+#define MPI3_IOCFACTS_DIAGTTYSIZE_NOT_SUPPORTED (0x00000000)
struct mpi3_mgmt_passthrough_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 1e0a3dcaf723..fdc3d1968e43 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (28)
+#define MPI3_VERSION_UNIT (31)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 3de1ee05c44e..f5a1529fa537 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -55,15 +55,15 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.5.1.0.0"
-#define MPI3MR_DRIVER_RELDATE "5-December-2023"
+#define MPI3MR_DRIVER_VERSION "8.8.1.0.50"
+#define MPI3MR_DRIVER_RELDATE "5-March-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>"
#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver"
-#define MPI3MR_NAME_LENGTH 32
+#define MPI3MR_NAME_LENGTH 64
#define IOCNAME "%s: "
#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024)
@@ -294,6 +294,10 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
};
+#define MPI3MR_RESET_REASON_OSTYPE_LINUX 1
+#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
+#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -1142,7 +1146,7 @@ struct mpi3mr_ioc {
spinlock_t fwevt_lock;
struct list_head fwevt_list;
- char watchdog_work_q_name[20];
+ char watchdog_work_q_name[50];
struct workqueue_struct *watchdog_work_q;
struct delayed_work watchdog_work;
spinlock_t watchdog_lock;
@@ -1336,7 +1340,7 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc);
void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason, u8 snapdump);
+ u16 reset_reason, u8 snapdump);
void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
@@ -1348,7 +1352,6 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
-void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 55d590b91947..1638109a68a0 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -1598,26 +1598,33 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
rval = -EAGAIN;
if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
goto out_unlock;
- dprint_bsg_err(mrioc,
- "%s: bsg request timedout after %d seconds\n", __func__,
- karg->timeout);
- if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
- dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) &&
+ (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED))
+ || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) {
+ ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n",
+ __func__, karg->timeout);
+ if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) {
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
"bsg_mpi3_req");
if (mpi_header->function ==
- MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ MPI3_FUNCTION_MGMT_PASSTHROUGH) {
drv_buf_iter = &drv_bufs[0];
dprint_dump(drv_buf_iter->kern_buf,
rmc_size, "mpi3_mgmt_req");
+ }
}
}
if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
- (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
+ (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) {
+ dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n"
+ "issuing target reset to (0x%04x)\n", __func__,
+ karg->timeout, mpi_header->function_dependent);
mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mpi_header->function_dependent, 0,
MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
&mrioc->host_tm_cmds, &resp_code, NULL);
+ }
if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
!(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
mpi3mr_soft_reset_handler(mrioc,
@@ -1838,6 +1845,10 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
{
struct device *bsg_dev = &mrioc->bsg_dev;
struct device *parent = &mrioc->shost->shost_gendev;
+ struct queue_limits lim = {
+ .max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS,
+ .max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS,
+ };
device_initialize(bsg_dev);
@@ -1853,20 +1864,14 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
return;
}
- mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
mpi3mr_bsg_request, NULL, 0);
if (IS_ERR(mrioc->bsg_queue)) {
ioc_err(mrioc, "%s: bsg registration failed\n",
dev_name(bsg_dev));
device_del(bsg_dev);
put_device(bsg_dev);
- return;
}
-
- blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
- blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
-
- return;
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 528f19f782f2..c2a22e96f7b7 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -11,7 +11,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
static int
-mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
@@ -1195,7 +1195,7 @@ static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
u32 reset_reason)
{
- u32 ioc_config, timeout, ioc_status;
+ u32 ioc_config, timeout, ioc_status, scratch_pad0;
int retval = -1;
ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
@@ -1204,7 +1204,11 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
return retval;
}
mpi3mr_clear_reset_history(mrioc);
- writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
+ MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
+ (mrioc->facts.ioc_num <<
+ MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
+ writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
@@ -1276,7 +1280,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
- MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
ioc_err(mrioc,
"critical error: multipath capability is enabled at the\n"
"\tcontroller while sas transport support is enabled at the\n"
@@ -1520,11 +1524,11 @@ static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
- u32 reset_reason)
+ u16 reset_reason)
{
int retval = -1;
u8 unlock_retry_count = 0;
- u32 host_diagnostic, ioc_status, ioc_config;
+ u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
@@ -1576,6 +1580,9 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
unlock_retry_count, host_diagnostic);
} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
+ scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
+ MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
+ MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
writel(host_diagnostic | reset_type,
&mrioc->sysif_regs->host_diagnostic);
@@ -2581,7 +2588,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
unsigned long flags;
enum mpi3mr_iocstate ioc_state;
u32 fault, host_diagnostic, ioc_status;
- u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+ u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
if (mrioc->reset_in_progress)
return;
@@ -3302,6 +3309,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
iocinit_req.msg_flags |=
MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
+ iocinit_req.msg_flags |=
+ MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
init_completion(&mrioc->init_cmds.done);
retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
@@ -3668,15 +3677,15 @@ static const struct {
u32 capability;
char *name;
} mpi3mr_capabilities[] = {
- { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
- { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
+ { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
};
/**
* mpi3mr_print_ioc_info - Display controller information
* @mrioc: Adapter instance reference
*
- * Display controller personalit, capability, supported
+ * Display controller personality, capability, supported
* protocols etc.
*
* Return: Nothing
@@ -3685,20 +3694,20 @@ static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
{
int i = 0, bytes_written = 0;
- char personality[16];
+ const char *personality;
char protocol[50] = {0};
char capabilities[100] = {0};
struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
switch (mrioc->facts.personality) {
case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
- strncpy(personality, "Enhanced HBA", sizeof(personality));
+ personality = "Enhanced HBA";
break;
case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
- strncpy(personality, "RAID", sizeof(personality));
+ personality = "RAID";
break;
default:
- strncpy(personality, "Unknown", sizeof(personality));
+ personality = "Unknown";
break;
}
@@ -3951,7 +3960,7 @@ retry_init:
MPI3MR_HOST_IOS_KDUMP);
if (!(mrioc->facts.ioc_capabilities &
- MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
mrioc->sas_transport_enabled = 1;
mrioc->scsi_device_channel = 1;
mrioc->shost->max_channel = 1;
@@ -4966,7 +4975,7 @@ cleanup_drv_cmd:
* Return: 0 on success, non-zero on failure.
*/
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason, u8 snapdump)
+ u16 reset_reason, u8 snapdump)
{
int retval = 0, i;
unsigned long flags;
@@ -5102,6 +5111,7 @@ out:
mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
+ mrioc->stop_bsgs = 0;
retval = -1;
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 73c831a97d27..bce639a6cca1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -986,6 +986,25 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
return retval;
}
+static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev,
+ struct queue_limits *lim)
+{
+ u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP;
+
+ lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512;
+ lim->virt_boundary_mask = (1 << pgsz) - 1;
+}
+
+static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev,
+ struct queue_limits *lim)
+{
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE &&
+ (tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ mpi3mr_configure_nvme_dev(tgt_dev, lim);
+}
+
/**
* mpi3mr_update_sdev - Update SCSI device information
* @sdev: SCSI device reference
@@ -1001,35 +1020,21 @@ static void
mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
{
struct mpi3mr_tgt_dev *tgtdev;
+ struct queue_limits lim;
tgtdev = (struct mpi3mr_tgt_dev *)data;
if (!tgtdev)
return;
mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
- switch (tgtdev->dev_type) {
- case MPI3_DEVICE_DEVFORM_PCIE:
- /*The block layer hw sector size = 512*/
- if ((tgtdev->dev_spec.pcie_inf.dev_info &
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgtdev->dev_spec.pcie_inf.mdts / 512);
- if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
- else
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
- }
- break;
- default:
- break;
- }
+
+ lim = queue_limits_start_update(sdev->request_queue);
+ mpi3mr_configure_tgt_dev(tgtdev, &lim);
+ WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim));
}
/**
- * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
+ * mpi3mr_refresh_tgtdevs - Refresh target device exposure
* @mrioc: Adapter instance reference
*
* This is executed post controller reset to identify any
@@ -1038,8 +1043,7 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
*
* Return: Nothing.
*/
-
-void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
struct mpi3mr_stgt_priv_data *tgt_priv;
@@ -1047,8 +1051,8 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
dprint_reset(mrioc, "refresh target devices: check for removals\n");
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
- if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
- tgtdev->is_hidden &&
+ if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) ||
+ tgtdev->is_hidden) &&
tgtdev->host_exposed && tgtdev->starget &&
tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
@@ -2010,7 +2014,7 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_refresh_sas_ports(mrioc);
mpi3mr_refresh_expanders(mrioc);
}
- mpi3mr_rfresh_tgtdevs(mrioc);
+ mpi3mr_refresh_tgtdevs(mrioc);
ioc_info(mrioc,
"scan for non responding and newly added devices after soft reset completed\n");
break;
@@ -4393,15 +4397,17 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
}
/**
- * mpi3mr_slave_configure - Slave configure callback handler
+ * mpi3mr_device_configure - Slave configure callback handler
* @sdev: SCSI device reference
+ * @lim: queue limits
*
* Configure queue depth, max hardware sectors and virt boundary
* as required
*
* Return: 0 always.
*/
-static int mpi3mr_slave_configure(struct scsi_device *sdev)
+static int mpi3mr_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct scsi_target *starget;
struct Scsi_Host *shost;
@@ -4432,28 +4438,8 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
- switch (tgt_dev->dev_type) {
- case MPI3_DEVICE_DEVFORM_PCIE:
- /*The block layer hw sector size = 512*/
- if ((tgt_dev->dev_spec.pcie_inf.dev_info &
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgt_dev->dev_spec.pcie_inf.mdts / 512);
- if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
- else
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
- }
- break;
- default:
- break;
- }
-
+ mpi3mr_configure_tgt_dev(tgt_dev, lim);
mpi3mr_tgtdev_put(tgt_dev);
-
return retval;
}
@@ -4895,7 +4881,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
}
- scsiio_req->flags = cpu_to_le32(scsiio_flags);
+ scsiio_req->flags |= cpu_to_le32(scsiio_flags);
if (mpi3mr_op_request_post(mrioc, op_req_q,
scmd_priv_data->mpi3mr_scsiio_req)) {
@@ -4921,7 +4907,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.queuecommand = mpi3mr_qcmd,
.target_alloc = mpi3mr_target_alloc,
.slave_alloc = mpi3mr_slave_alloc,
- .slave_configure = mpi3mr_slave_configure,
+ .device_configure = mpi3mr_device_configure,
.target_destroy = mpi3mr_target_destroy,
.slave_destroy = mpi3mr_slave_destroy,
.scan_finished = mpi3mr_scan_finished,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 231b4d0df85f..3b1d02ae8551 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1351,11 +1351,21 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
mr_sas_port->remote_identify.sas_address, hba_port);
+ if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
+ ioc_info(mrioc, "max port count %u could be too high\n",
+ mr_sas_node->num_phys);
+
for (i = 0; i < mr_sas_node->num_phys; i++) {
if ((mr_sas_node->phy[i].remote_identify.sas_address !=
mr_sas_port->remote_identify.sas_address) ||
(mr_sas_node->phy[i].hba_port != hba_port))
continue;
+
+ if (i > sizeof(mr_sas_port->phy_mask) * 8) {
+ ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n",
+ i, sizeof(mr_sas_port->phy_mask) * 8);
+ goto out_fail;
+ }
list_add_tail(&mr_sas_node->phy[i].port_siblings,
&mr_sas_port->phy_list);
mr_sas_port->num_phys++;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1b492e9a3e55..105917ea70ff 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4774,7 +4774,7 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
char desc[17] = {0};
u32 iounit_pg1_flags;
- strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc));
ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
desc,
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index ef8ee93005ea..89ef43a5ef86 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2497,14 +2497,15 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
}
/**
- * scsih_slave_configure - device configure routine.
+ * scsih_device_configure - device configure routine.
* @sdev: scsi device struct
+ * @lim: queue limits
*
* Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_configure(struct scsi_device *sdev)
+scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -2609,8 +2610,7 @@ scsih_slave_configure(struct scsi_device *sdev)
raid_device->num_pds, ds);
if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
- blk_queue_max_hw_sectors(sdev->request_queue,
- MPT3SAS_RAID_MAX_SECTORS);
+ lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
sdev_printk(KERN_INFO, sdev,
"Set queue's max_sector to: %u\n",
MPT3SAS_RAID_MAX_SECTORS);
@@ -2675,8 +2675,7 @@ scsih_slave_configure(struct scsi_device *sdev)
pcie_device->connector_name);
if (pcie_device->nvme_mdts)
- blk_queue_max_hw_sectors(sdev->request_queue,
- pcie_device->nvme_mdts/512);
+ lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
pcie_device_put(pcie_device);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
@@ -2687,8 +2686,7 @@ scsih_slave_configure(struct scsi_device *sdev)
**/
blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
sdev->request_queue);
- blk_queue_virt_boundary(sdev->request_queue,
- ioc->page_size - 1);
+ lim->virt_boundary_mask = ioc->page_size - 1;
return 0;
}
@@ -11914,7 +11912,7 @@ static const struct scsi_host_template mpt2sas_driver_template = {
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
.slave_alloc = scsih_slave_alloc,
- .slave_configure = scsih_slave_configure,
+ .device_configure = scsih_device_configure,
.target_destroy = scsih_target_destroy,
.slave_destroy = scsih_slave_destroy,
.scan_finished = scsih_scan_finished,
@@ -11952,7 +11950,7 @@ static const struct scsi_host_template mpt3sas_driver_template = {
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
.slave_alloc = scsih_slave_alloc,
- .slave_configure = scsih_slave_configure,
+ .device_configure = scsih_device_configure,
.target_destroy = scsih_target_destroy,
.slave_destroy = scsih_slave_destroy,
.scan_finished = scsih_scan_finished,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 421ea511b664..76f9a9177198 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -458,17 +458,17 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto out;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strncpy(edev->vendor_id, manufacture_reply->vendor_id,
- SAS_EXPANDER_VENDOR_ID_LEN);
- strncpy(edev->product_id, manufacture_reply->product_id,
- SAS_EXPANDER_PRODUCT_ID_LEN);
- strncpy(edev->product_rev, manufacture_reply->product_rev,
- SAS_EXPANDER_PRODUCT_REV_LEN);
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ sizeof(edev->vendor_id));
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ sizeof(edev->product_id));
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ sizeof(edev->product_rev));
edev->level = manufacture_reply->sas_format & 1;
if (edev->level) {
- strncpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ sizeof(edev->component_vendor_id));
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 43ebb331e216..c792e4486e54 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -26,33 +26,18 @@ static const struct mvs_chip_info mvs_chips[] = {
};
static const struct attribute_group *mvst_host_groups[];
+static const struct attribute_group *mvst_sdev_groups[];
#define SOC_SAS_NUM 2
static const struct scsi_host_template mvs_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+ LIBSAS_SHT_BASE
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
.can_queue = 1,
- .this_id = -1,
.sg_tablesize = SG_ALL,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.shost_groups = mvst_host_groups,
+ .sdev_groups = mvst_sdev_groups,
.track_queue_depth = 1,
};
@@ -779,6 +764,11 @@ static struct attribute *mvst_host_attrs[] = {
ATTRIBUTE_GROUPS(mvst_host);
+static const struct attribute_group *mvst_sdev_groups[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
+
module_init(mvs_init);
module_exit(mvs_exit);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7b27618fd7b2..85ff95c6543a 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -1039,3 +1039,8 @@ const struct attribute_group *pm8001_host_groups[] = {
&pm8001_host_attr_group,
NULL
};
+
+const struct attribute_group *pm8001_sdev_groups[] = {
+ &sas_ata_sdev_attr_group,
+ NULL
+};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index ed6b7d954dda..1e63cb6cd8e3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -110,30 +110,13 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
* The main structure which LLDD must register for scsi core.
*/
static const struct scsi_host_template pm8001_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .proc_name = DRV_NAME,
- .queuecommand = sas_queuecommand,
- .dma_need_drain = ata_scsi_dma_need_drain,
- .target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+ LIBSAS_SHT_BASE
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
.can_queue = 1,
- .this_id = -1,
.sg_tablesize = PM8001_MAX_DMA_SG,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
- .slave_alloc = sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sas_ioctl,
-#endif
.shost_groups = pm8001_host_groups,
+ .sdev_groups = pm8001_sdev_groups,
.track_queue_depth = 1,
.cmd_per_lun = 32,
.map_queues = pm8001_map_queues,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 3ccb7371902f..ced6721380a8 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -717,6 +717,7 @@ int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha);
void pm8001_free_dev(struct pm8001_device *pm8001_dev);
/* ctl shared API */
extern const struct attribute_group *pm8001_host_groups[];
+extern const struct attribute_group *pm8001_sdev_groups[];
#define PM8001_INVALID_TAG ((u32)-1)
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 0614b7e366b7..0efe2fc8b308 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -197,8 +197,9 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
}
/**
- * pmcraid_slave_configure - Configures a SCSI device
+ * pmcraid_device_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
+ * @lim: queue limits
*
* This function is executed by SCSI mid layer just after a device is first
* scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
@@ -209,7 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
* Return value:
* 0 on success
*/
-static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
+static int pmcraid_device_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
@@ -233,8 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
scsi_dev->allow_restart = 1;
blk_queue_rq_timeout(scsi_dev->request_queue,
PMCRAID_VSET_IO_TIMEOUT);
- blk_queue_max_hw_sectors(scsi_dev->request_queue,
- PMCRAID_VSET_MAX_SECTORS);
+ lim->max_hw_sectors = PMCRAID_VSET_MAX_SECTORS;
}
/*
@@ -3668,7 +3669,7 @@ static const struct scsi_host_template pmcraid_host_template = {
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
.slave_alloc = pmcraid_slave_alloc,
- .slave_configure = pmcraid_slave_configure,
+ .device_configure = pmcraid_device_configure,
.slave_destroy = pmcraid_slave_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.can_queue = PMCRAID_MAX_IO_CMD,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index d592ee9170c1..8300f0bdddb3 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -986,12 +986,6 @@ second_pass:
return -ENODEV;
}
-static int ppa_adjust_queue(struct scsi_device *device)
-{
- blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
- return 0;
-}
-
static const struct scsi_host_template ppa_template = {
.module = THIS_MODULE,
.proc_name = "ppa",
@@ -1005,7 +999,6 @@ static const struct scsi_host_template ppa_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.can_queue = 1,
- .slave_alloc = ppa_adjust_queue,
.cmd_size = sizeof(struct scsi_pointer),
};
@@ -1111,6 +1104,7 @@ static int __ppa_attach(struct parport *pb)
host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
if (!host)
goto out1;
+ host->no_highmem = true;
host->io_port = pb->base;
host->n_io_port = ports;
host->dma_channel = -1;
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 451fd236bfd0..96174353e389 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
if (!count || *ppos)
return 0;
- kern_buf = memdup_user(buffer, count);
+ kern_buf = memdup_user_nul(buffer, count);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index bf921caaf6ae..054a51713d55 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2324,9 +2324,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
io_req->fcport = fcport;
io_req->cmd_type = QEDF_TASK_MGMT_CMD;
- /* Record which cpu this request is associated with */
- io_req->cpu = smp_processor_id();
-
/* Set TM flags */
io_req->io_req_flags = QEDF_READ;
io_req->data_xfer_len = 0;
@@ -2349,6 +2346,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
spin_lock_irqsave(&fcport->rport_lock, flags);
+ /* Record which cpu this request is associated with */
+ io_req->cpu = smp_processor_id();
+
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index a58353b7b4e8..fd12439cbaab 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3468,7 +3468,7 @@ retry_probe:
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
- strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
+ strscpy(slowpath_params.name, "qedf", sizeof(slowpath_params.name));
rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 8deb2001dc2f..37eed6a27816 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -120,15 +120,11 @@ static ssize_t
qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- size_t cnt = 0;
-
- if (*ppos)
- return 0;
+ char buf[64];
+ int len;
- cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
static int
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index a584708d3056..a8b4314bfd6e 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -7,29 +7,29 @@ config SCSI_QLA_FC
select FW_LOADER
select BTREE
help
- This qla2xxx driver supports all QLogic Fibre Channel
- PCI and PCIe host adapters.
+ This qla2xxx driver supports all QLogic Fibre Channel
+ PCI and PCIe host adapters.
- By default, firmware for the ISP parts will be loaded
- via the Firmware Loader interface.
+ By default, firmware for the ISP parts will be loaded
+ via the Firmware Loader interface.
- ISP Firmware Filename
- ---------- -----------------
- 21xx ql2100_fw.bin
- 22xx ql2200_fw.bin
- 2300, 2312, 6312 ql2300_fw.bin
- 2322, 6322 ql2322_fw.bin
- 24xx, 54xx ql2400_fw.bin
- 25xx ql2500_fw.bin
+ ISP Firmware Filename
+ ---------- -----------------
+ 21xx ql2100_fw.bin
+ 22xx ql2200_fw.bin
+ 2300, 2312, 6312 ql2300_fw.bin
+ 2322, 6322 ql2322_fw.bin
+ 24xx, 54xx ql2400_fw.bin
+ 25xx ql2500_fw.bin
- Upon request, the driver caches the firmware image until
- the driver is unloaded.
+ Upon request, the driver caches the firmware image until
+ the driver is unloaded.
- Firmware images can be retrieved from:
+ Firmware images can be retrieved from:
- http://ldriver.qlogic.com/firmware/
+ http://ldriver.qlogic.com/firmware/
- They are also included in the linux-firmware tree as well.
+ They are also included in the linux-firmware tree as well.
config TCM_QLA2XXX
tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs"
@@ -38,13 +38,15 @@ config TCM_QLA2XXX
select BTREE
default n
help
- Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
+ Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+
+ series target mode HBAs.
if TCM_QLA2XXX
config TCM_QLA2XXX_DEBUG
bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
default n
help
- Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
- This will include code to enable the SCSI command jammer
+ Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for
+ QLogic 24xx+ series target mode HBAs.
+ This will include code to enable the SCSI command jammer.
endif
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 55ff3d7482b3..a1545dad0c0c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
- seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
exch_used, ha->base_qpair->fwres.exch_limit);
if (ql2xenforce_iocb_limit == 2) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1e2f52210f60..fcb06df2ce4e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1957,9 +1957,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
- if (IS_T10_PI_CAPABLE(vha->hw))
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
-
scsi_change_queue_depth(sdev, req->max_q_depth);
return 0;
}
@@ -3575,6 +3572,9 @@ skip_dpc:
QLA_SG_ALL : 128;
}
+ if (IS_T10_PI_CAPABLE(base_vha->hw))
+ host->dma_alignment = 0x7;
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -8156,9 +8156,6 @@ MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
static struct pci_driver qla2xxx_pci_driver = {
.name = QLA2XXX_DRIVER_NAME,
- .driver = {
- .owner = THIS_MODULE,
- },
.id_table = qla2xxx_pci_tbl,
.probe = qla2x00_probe_one,
.remove = qla2x00_remove_one,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 249f1d7021d4..75125d2021f5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1641,6 +1641,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
struct ql4_chap_table *chap_table;
uint32_t chap_size = 0;
dma_addr_t chap_dma;
+ ssize_t secret_len;
chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
if (chap_table == NULL) {
@@ -1652,9 +1653,13 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
chap_table->flags |= BIT_6; /* peer */
else
chap_table->flags |= BIT_7; /* local */
- chap_table->secret_len = strlen(password);
- strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
- strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
+
+ secret_len = strscpy(chap_table->secret, password,
+ sizeof(chap_table->secret));
+ if (secret_len < MIN_CHAP_SECRET_LEN)
+ goto cleanup_chap_table;
+ chap_table->secret_len = (uint8_t)secret_len;
+ strscpy(chap_table->name, username, sizeof(chap_table->name));
chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
if (is_qla40XX(ha)) {
@@ -1679,6 +1684,8 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
memcpy((struct ql4_chap_table *)ha->chap_list + idx,
chap_table, sizeof(struct ql4_chap_table));
}
+
+cleanup_chap_table:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
if (rval != QLA_SUCCESS)
ret = -EINVAL;
@@ -2281,8 +2288,8 @@ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
mbox_cmd[0] = MBOX_CMD_SET_PARAM;
if (param == SET_DRVR_VERSION) {
mbox_cmd[1] = SET_DRVR_VERSION;
- strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
- MAX_DRVR_VER_LEN - 1);
+ strscpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
+ MAX_DRVR_VER_LEN);
} else {
ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
__func__, param);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 675332e49a7b..17cccd14765f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -799,10 +799,10 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
chap_rec->chap_tbl_idx = i;
strscpy(chap_rec->username, chap_table->name,
- ISCSI_CHAP_AUTH_NAME_MAX_LEN);
- strscpy(chap_rec->password, chap_table->secret,
- QL4_CHAP_MAX_SECRET_LEN);
- chap_rec->password_length = chap_table->secret_len;
+ sizeof(chap_rec->username));
+ chap_rec->password_length = strscpy(chap_rec->password,
+ chap_table->secret,
+ sizeof(chap_rec->password));
if (chap_table->flags & BIT_7) /* local */
chap_rec->chap_type = CHAP_TYPE_OUT;
@@ -6291,8 +6291,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
tddb->tpgt = sess->tpgt;
tddb->port = conn->persistent_port;
- strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
- strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+ strscpy(tddb->iscsi_name, sess->targetname, sizeof(tddb->iscsi_name));
+ strscpy(tddb->ip_addr, conn->persistent_address, sizeof(tddb->ip_addr));
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
@@ -7792,7 +7792,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
}
strscpy(flash_tddb->iscsi_name, fnode_sess->targetname,
- ISCSI_NAME_SIZE);
+ sizeof(flash_tddb->iscsi_name));
if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index f795848b316c..eb52e39f37c9 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
@@ -32,38 +33,43 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
-void scsi_show_rq(struct seq_file *m, struct request *rq)
+static const char *scsi_cmd_list_info(struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2;
struct Scsi_Host *shost = cmd->device->host;
+ struct scsi_cmnd *cmd2;
+
+ guard(spinlock_irq)(shost->host_lock);
+
+ list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry)
+ if (cmd == cmd2)
+ return "on eh_abort_list";
+
+ list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry)
+ if (cmd == cmd2)
+ return "on eh_cmd_q";
+
+ return NULL;
+}
+
+void scsi_show_rq(struct seq_file *m, struct request *rq)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
- const char *list_info = NULL;
char buf[80] = "(?)";
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) {
- if (cmd == cmd2) {
- list_info = "on eh_abort_list";
- goto unlock;
- }
- }
- list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) {
- if (cmd == cmd2) {
- list_info = "on eh_cmd_q";
- goto unlock;
- }
- }
-unlock:
- spin_unlock_irq(shost->host_lock);
+ if (cmd->flags & SCMD_INITIALIZED) {
+ const char *list_info = scsi_cmd_list_info(cmd);
- __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
- seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=",
- buf, cmd->retries, cmd->allowed, cmd->result,
- list_info ? : "", list_info ? ", " : "");
+ __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+ seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x%s%s",
+ buf, cmd->retries, cmd->allowed, cmd->result,
+ list_info ? ", " : "", list_info ? : "");
+ seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
+ timeout_ms / 1000, timeout_ms % 1000,
+ alloc_ms / 1000, alloc_ms % 1000);
+ }
+ seq_printf(m, ", .flags=");
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
ARRAY_SIZE(scsi_cmd_flags));
- seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
- timeout_ms / 1000, timeout_ms % 1000,
- alloc_ms / 1000, alloc_ms % 1000);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ba7237e83863..a7071e71389e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -293,14 +293,16 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
size_t from_length;
from_length = strlen(from);
- /* This zero-pads the destination */
- strncpy(to, from, to_length);
- if (from_length < to_length && !compatible) {
- /*
- * space pad the string if it is short.
- */
- memset(&to[from_length], ' ', to_length - from_length);
- }
+
+ /*
+ * null pad and null terminate if compatible
+ * otherwise space pad
+ */
+ if (compatible)
+ strscpy_pad(to, from, to_length);
+ else
+ memcpy_and_pad(to, to_length, from, from_length, ' ');
+
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
__func__, name, from);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 967b6d62bb37..ec39acc986d6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -32,7 +32,7 @@
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
+#include <scsi/scsi_transport.h> /* scsi_init_limits() */
#include <scsi/scsi_dh.h>
#include <trace/events/scsi.h>
@@ -1963,42 +1963,36 @@ static void scsi_map_queues(struct blk_mq_tag_set *set)
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
-void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
+void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
{
struct device *dev = shost->dma_dev;
- /*
- * this limit is imposed by hardware restrictions
- */
- blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
- SG_MAX_SEGMENTS));
+ memset(lim, 0, sizeof(*lim));
+ lim->max_segments =
+ min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS);
if (scsi_host_prot_dma(shost)) {
shost->sg_prot_tablesize =
min_not_zero(shost->sg_prot_tablesize,
(unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
- blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
+ lim->max_integrity_segments = shost->sg_prot_tablesize;
}
- blk_queue_max_hw_sectors(q, shost->max_sectors);
- blk_queue_segment_boundary(q, shost->dma_boundary);
- dma_set_seg_boundary(dev, shost->dma_boundary);
+ lim->max_hw_sectors = shost->max_sectors;
+ lim->seg_boundary_mask = shost->dma_boundary;
+ lim->max_segment_size = shost->max_segment_size;
+ lim->virt_boundary_mask = shost->virt_boundary_mask;
+ lim->dma_alignment = max_t(unsigned int,
+ shost->dma_alignment, dma_get_cache_alignment() - 1);
- blk_queue_max_segment_size(q, shost->max_segment_size);
- blk_queue_virt_boundary(q, shost->virt_boundary_mask);
- dma_set_max_seg_size(dev, queue_max_segment_size(q));
+ if (shost->no_highmem)
+ lim->bounce = BLK_BOUNCE_HIGH;
- /*
- * Set a reasonable default alignment: The larger of 32-byte (dword),
- * which is a common minimum for HBAs, and the minimum DMA alignment,
- * which is set by the platform.
- *
- * Devices that require a bigger alignment can increase it later.
- */
- blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+ dma_set_max_seg_size(dev, shost->max_segment_size);
}
-EXPORT_SYMBOL_GPL(__scsi_init_queue);
+EXPORT_SYMBOL_GPL(scsi_init_limits);
static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.get_budget = scsi_mq_get_budget,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ffd7e7e72933..8300fc28cb10 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -227,7 +227,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
/*
* realloc if new shift is calculated, which is caused by setting
- * up one new default queue depth after calling ->slave_configure
+ * up one new default queue depth after calling ->device_configure
*/
if (!need_alloc && new_shift != sdev->budget_map.shift)
need_alloc = need_free = true;
@@ -283,6 +283,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
struct request_queue *q;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct queue_limits lim;
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
GFP_KERNEL);
@@ -332,7 +333,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
- q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
+ scsi_init_limits(shost, &lim);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, NULL);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@@ -343,7 +345,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
- __scsi_init_queue(sdev->host, q);
depth = sdev->host->cmd_per_lun ?: 1;
@@ -873,6 +874,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
blist_flags_t *bflags, int async)
{
+ const struct scsi_host_template *hostt = sdev->host->hostt;
+ struct queue_limits lim;
int ret;
/*
@@ -1004,19 +1007,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->select_no_atn = 1;
/*
- * Maximum 512 sector transfer length
- * broken RA4x00 Compaq Disk Array
- */
- if (*bflags & BLIST_MAX_512)
- blk_queue_max_hw_sectors(sdev->request_queue, 512);
- /*
- * Max 1024 sector transfer length for targets that report incorrect
- * max/optimal lengths and relied on the old block layer safe default
- */
- else if (*bflags & BLIST_MAX_1024)
- blk_queue_max_hw_sectors(sdev->request_queue, 1024);
-
- /*
* Some devices may not want to have a start command automatically
* issued when a device is added.
*/
@@ -1076,28 +1066,46 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
- if (sdev->host->hostt->slave_configure) {
- ret = sdev->host->hostt->slave_configure(sdev);
- if (ret) {
- /*
- * if LLDD reports slave not present, don't clutter
- * console with alloc failure messages
- */
- if (ret != -ENXIO) {
- sdev_printk(KERN_ERR, sdev,
- "failed to configure device\n");
- }
- return SCSI_SCAN_NO_RESPONSE;
- }
+ /*
+ * No need to freeze the queue as it isn't reachable to anyone else yet.
+ */
+ lim = queue_limits_start_update(sdev->request_queue);
+ if (*bflags & BLIST_MAX_512)
+ lim.max_hw_sectors = 512;
+ else if (*bflags & BLIST_MAX_1024)
+ lim.max_hw_sectors = 1024;
+ if (hostt->device_configure)
+ ret = hostt->device_configure(sdev, &lim);
+ else if (hostt->slave_configure)
+ ret = hostt->slave_configure(sdev);
+ if (ret) {
+ queue_limits_cancel_update(sdev->request_queue);
/*
- * The queue_depth is often changed in ->slave_configure.
- * Set up budget map again since memory consumption of
- * the map depends on actual queue depth.
+ * If the LLDD reports device not present, don't clutter the
+ * console with failure messages.
*/
- scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
+ if (ret != -ENXIO)
+ sdev_printk(KERN_ERR, sdev,
+ "failed to configure device\n");
+ return SCSI_SCAN_NO_RESPONSE;
}
+ ret = queue_limits_commit_update(sdev->request_queue, &lim);
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+
+ /*
+ * The queue_depth is often changed in ->device_configure.
+ *
+ * Set up budget map again since memory consumption of the map depends
+ * on actual queue depth.
+ */
+ if (hostt->device_configure || hostt->slave_configure)
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
+
if (sdev->scsi_level >= SCSI_3)
scsi_attach_vpd(sdev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 775df00021e4..b5aae4e8ae33 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1609,13 +1609,14 @@ restart:
}
EXPORT_SYMBOL(scsi_remove_target);
-int scsi_register_driver(struct device_driver *drv)
+int __scsi_register_driver(struct device_driver *drv, struct module *owner)
{
drv->bus = &scsi_bus_type;
+ drv->owner = owner;
return driver_register(drv);
}
-EXPORT_SYMBOL(scsi_register_driver);
+EXPORT_SYMBOL(__scsi_register_driver);
int scsi_register_interface(struct class_interface *intf)
{
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b04075f19445..7d088b8da075 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -4276,6 +4276,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
{
struct device *dev = &shost->shost_gendev;
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct queue_limits lim;
struct request_queue *q;
char bsg_name[20];
@@ -4286,16 +4287,16 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
-
- q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
- i->f->dd_bsg_size);
+ scsi_init_limits(shost, &lim);
+ lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments);
+ q = bsg_setup_queue(dev, bsg_name, &lim, fc_bsg_dispatch,
+ fc_bsg_job_timeout, i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev,
"fc_host%d: bsg interface failed to initialize - setup queue\n",
shost->host_no);
return PTR_ERR(q);
}
- __scsi_init_queue(shost, q);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
@@ -4311,6 +4312,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
{
struct device *dev = &rport->dev;
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct queue_limits lim;
struct request_queue *q;
rport->rqst_q = NULL;
@@ -4318,13 +4320,14 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
+ scsi_init_limits(shost, &lim);
+ lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments);
+ q = bsg_setup_queue(dev, dev_name(dev), &lim, fc_bsg_dispatch_prep,
fc_bsg_job_timeout, i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev, "failed to setup bsg queue\n");
return PTR_ERR(q);
}
- __scsi_init_queue(shost, q);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index af3ac6346796..93e1978ad564 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1535,6 +1535,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
{
struct device *dev = &shost->shost_gendev;
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
+ struct queue_limits lim;
struct request_queue *q;
char bsg_name[20];
@@ -1542,13 +1543,14 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
- q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0);
+ scsi_init_limits(shost, &lim);
+ q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL,
+ 0);
if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
return PTR_ERR(q);
}
- __scsi_init_queue(shost, q);
ihost->bsg_q = q;
return 0;
@@ -1603,7 +1605,6 @@ static DEFINE_MUTEX(rx_queue_mutex);
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
-static LIST_HEAD(connlist_err);
static DEFINE_SPINLOCK(connlock);
static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index d704c484a251..424a89513814 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -197,7 +197,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
}
if (rphy) {
- q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
+ q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL,
sas_smp_dispatch, NULL, 0);
if (IS_ERR(q))
return PTR_ERR(q);
@@ -206,7 +206,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
char name[20];
snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
- q = bsg_setup_queue(&shost->shost_gendev, name,
+ q = bsg_setup_queue(&shost->shost_gendev, name, NULL,
sas_smp_dispatch, NULL, 0);
if (IS_ERR(q))
return PTR_ERR(q);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 64c5129044b3..332eb9dac22d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -4193,7 +4193,6 @@ static const struct dev_pm_ops sd_pm_ops = {
static struct scsi_driver sd_template = {
.gendrv = {
.name = "sd",
- .owner = THIS_MODULE,
.probe = sd_probe,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.remove = sd_remove,
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0f2c87cc95e6..e22c7f5e652b 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -908,7 +908,6 @@ static struct class_interface ses_interface = {
static struct scsi_driver ses_template = {
.gendrv = {
.name = "ses",
- .owner = THIS_MODULE,
.probe = ses_probe,
.remove = ses_remove,
},
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 385180c98be4..bb15e0ac8fe4 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1041,9 +1041,8 @@ static int pqi_write_driver_version_to_host_wellness(
buffer->driver_version_tag[1] = 'V';
put_unaligned_le16(sizeof(buffer->driver_version),
&buffer->driver_version_length);
- strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
- sizeof(buffer->driver_version) - 1);
- buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+ strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
+ sizeof(buffer->driver_version));
buffer->dont_write_tag[0] = 'D';
buffer->dont_write_tag[1] = 'W';
buffer->end_tag[0] = 'Z';
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
index 3ddbdbc3ded1..48bf82d042b4 100644
--- a/drivers/scsi/snic/snic_attrs.c
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -13,7 +13,7 @@ snic_show_sym_name(struct device *dev,
{
struct snic *snic = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
+ return sysfs_emit(buf, "%s\n", snic->name);
}
static ssize_t
@@ -23,8 +23,7 @@ snic_show_state(struct device *dev,
{
struct snic *snic = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n",
- snic_state_str[snic_get_state(snic)]);
+ return sysfs_emit(buf, "%s\n", snic_state_str[snic_get_state(snic)]);
}
static ssize_t
@@ -32,7 +31,7 @@ snic_show_drv_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
+ return sysfs_emit(buf, "%s\n", SNIC_DRV_VERSION);
}
static ssize_t
@@ -45,8 +44,8 @@ snic_show_link_state(struct device *dev,
if (snic->config.xpt_type == SNIC_DAS)
snic->link_status = svnic_dev_link_status(snic->vdev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (snic->link_status) ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n",
+ (snic->link_status) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 268b3a40891e..7ab000942b97 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -95,7 +95,6 @@ static const struct dev_pm_ops sr_pm_ops = {
static struct scsi_driver sr_template = {
.gendrv = {
.name = "sr",
- .owner = THIS_MODULE,
.probe = sr_probe,
.remove = sr_remove,
.pm = &sr_pm_ops,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5a9bcf8e0792..0d8ce1a92168 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -206,7 +206,6 @@ static int st_remove(struct device *);
static struct scsi_driver st_template = {
.gendrv = {
.name = "st",
- .owner = THIS_MODULE,
.probe = st_probe,
.remove = st_remove,
.groups = st_drv_groups,
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index e4fafc77bd20..a44b60c9004a 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1721,9 +1721,7 @@ wd33c93_setup(char *str)
p1 = setup_buffer;
*p1 = '\0';
if (str)
- strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
- setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
- p1 = setup_buffer;
+ strscpy(p1, str, SETUP_BUFFER_SIZE);
i = 0;
while (*p1 && (i < MAX_SETUP_ARGS)) {
p2 = strchr(p1, ',');
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 86d32e3b3282..c4f54c311d05 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -70,18 +70,6 @@ static int slave_alloc(struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
- /*
- * Scatter-gather buffers (all but the last) must have a length
- * divisible by the bulk maxpacket size. Otherwise a data packet
- * would end up being short, causing a premature end to the data
- * transfer. Since high-speed bulk pipes have a maxpacket size
- * of 512, we'll use that as the scsi device queue's DMA alignment
- * mask. Guaranteeing proper alignment of the first buffer will
- * have the desired effect because, except at the beginning and
- * the end, scatter-gather buffers follow page boundaries.
- */
- blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
-
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
* what is originally reported. We need this to avoid confusing
* the SCSI layer with devices that report 0 or 1, but need 10-byte
@@ -219,6 +207,18 @@ static const struct scsi_host_template rtsx_host_template = {
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
+ /*
+ * Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries.
+ */
+ .dma_alignment = 511,
+
/* emulated HBA */
.emulated = 1,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7a85e6477e46..bf4892544cfd 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -37,7 +37,6 @@
#include "target_core_ua.h"
static DEFINE_MUTEX(device_mutex);
-static LIST_HEAD(device_list);
static DEFINE_IDR(devices_idr);
static struct se_hba *lun0_hba;
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 768bf87cd80d..005d63ab1f44 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -601,8 +601,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
- utrd = hwq->sqe_base_addr +
- sq_head_slot * sizeof(struct utp_transfer_req_desc);
+ utrd = hwq->sqe_base_addr + sq_head_slot;
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 374e5aae4e7e..433d0480391e 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -253,7 +253,8 @@ int ufs_bsg_probe(struct ufs_hba *hba)
if (ret)
goto out;
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request,
+ NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a0f8e930167d..0cf07194bbe8 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -748,8 +748,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == ufshci_version(1, 0))
- return INTERRUPT_MASK_ALL_VER_10;
if (hba->ufs_version <= ufshci_version(2, 0))
return INTERRUPT_MASK_ALL_VER_11;
@@ -990,30 +988,6 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
-{
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if (hba->ufs_version <= ufshci_version(1, 1))
- return UFS_UNIPRO_VER_1_41;
- else
- return UFS_UNIPRO_VER_1_6;
-}
-EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
-
-static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
-{
- /*
- * If both host and device support UniPro ver1.6 or later, PA layer
- * parameters tuning happens during link startup itself.
- *
- * We can manually tune PA layer parameters if either host or device
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
- * logic simple, we will only do manual tuning if local unipro version
- * doesn't support ver1.6 or later.
- */
- return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
-}
-
/**
* ufshcd_pm_qos_init - initialize PM QoS request
* @hba: per adapter instance
@@ -2674,14 +2648,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = set & INTERRUPT_MASK_RW_VER_10;
- set = rw | ((set ^ intrs) & intrs);
- } else {
- set |= intrs;
- }
-
+ set |= intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
@@ -2694,34 +2661,30 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = (set & INTERRUPT_MASK_RW_VER_10) &
- ~(intrs & INTERRUPT_MASK_RW_VER_10);
- set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
-
- } else {
- set &= ~intrs;
- }
-
+ set &= ~intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
+ * @hba: per adapter instance
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
* @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
- enum dma_data_direction cmd_dir, int ehs_length)
+static void
+ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ u8 *upiu_flags, enum dma_data_direction cmd_dir,
+ int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
struct request_desc_header *h = &req_desc->header;
enum utp_data_direction data_direction;
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
*h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
@@ -2854,12 +2817,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
u8 upiu_flags;
int ret = 0;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2882,13 +2841,7 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
@@ -3061,15 +3014,21 @@ out:
return err;
}
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
{
lrbp->cmd = NULL;
lrbp->task_tag = tag;
- lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->lun = lun;
lrbp->intr_cmd = true; /* No interrupt aggregation */
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
return ufshcd_compose_devman_upiu(hba, lrbp);
}
@@ -3082,16 +3041,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
- struct request *rq;
-
- if (!cmd)
- return false;
-
- rq = scsi_cmd_to_rq(cmd);
- if (!blk_mq_request_started(rq))
- return false;
-
- return true;
+ return cmd && blk_mq_rq_state(scsi_cmd_to_rq(cmd)) == MQ_RQ_IN_FLIGHT;
}
/*
@@ -3276,6 +3226,39 @@ retry:
return err;
}
+static void ufshcd_dev_man_lock(struct ufs_hba *hba)
+{
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
+{
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+}
+
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ const u32 tag, int timeout)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int err;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+
+ return err;
+}
+
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
@@ -3290,34 +3273,18 @@ retry:
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
-
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out;
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ return err;
-out:
- up_read(&hba->clk_scaling_lock);
- return err;
+ return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
}
/**
@@ -3387,8 +3354,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3430,8 +3397,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3461,9 +3427,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3493,8 +3458,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
*attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3557,9 +3521,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
hba->dev_cmd.query.descriptor = desc_buf;
@@ -3592,8 +3555,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -4289,7 +4251,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
* Make sure UIC command completion interrupt is disabled before
* issuing UIC command.
*/
- wmb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4772,12 +4734,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
- * Make sure base address and interrupt setup are updated before
- * enabling the run/stop registers below.
- */
- wmb();
-
- /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
@@ -5074,8 +5030,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
hba->nop_out_timeout);
@@ -5085,8 +5041,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+
+ ufshcd_dev_man_unlock(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -5264,9 +5220,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
*/
sdev->silence_suspend = 1;
- if (hba->vops && hba->vops->config_scsi_dev)
- hba->vops->config_scsi_dev(sdev);
-
ufshcd_crypto_register(hba, q);
return 0;
@@ -5549,15 +5502,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
- lrbp->utr_descriptor_ptr->header.ocs = ocs;
- }
- complete(hba->dev_cmd.complete);
+ } else if (hba->dev_cmd.complete) {
+ if (cqe) {
+ ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
+ complete(hba->dev_cmd.complete);
}
}
@@ -7092,10 +7042,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
-
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -7203,35 +7150,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
u8 upiu_flags;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
-
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0;
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = cmd_type;
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
/* update the task tag in the request upiu */
req_upiu->header.task_tag = tag;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
-
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
@@ -7245,17 +7178,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7278,7 +7206,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
- up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7317,13 +7244,11 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
break;
case UPIU_TRANSACTION_TASK_REQ:
@@ -7373,41 +7298,21 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
int result;
u8 upiu_flags;
u8 *ehs_data;
u16 ehs_len;
+ int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
+ ufshcd_dev_man_lock(hba);
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = UFS_UPIU_RPMB_WLUN;
-
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
-
- /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
- /*
- * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
- * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
- * HW controller takes EHS length from UTRD.
- */
- if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
- else
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
/* update the task tag */
req_upiu->header.task_tag = tag;
@@ -7422,11 +7327,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
-
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
if (!err) {
/* Just copy the upiu response as it is */
@@ -7451,9 +7352,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
- up_read(&hba->clk_scaling_lock);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
+
return err ? : result;
}
@@ -8400,83 +8300,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
}
/**
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
- * @hba: per-adapter instance
- *
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
- * the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(
- RX_MIN_ACTIVATETIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_min_activatetime);
- if (ret)
- goto out;
-
- /* make sure proper unit conversion is applied */
- tuned_pa_tactivate =
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
- / PA_TACTIVATE_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- tuned_pa_tactivate);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
- * @hba: per-adapter instance
- *
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
- * This optimal value can help reduce the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
- u32 max_hibern8_time, tuned_pa_hibern8time;
-
- ret = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &local_tx_hibern8_time_cap);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_hibern8_time_cap);
- if (ret)
- goto out;
-
- max_hibern8_time = max(local_tx_hibern8_time_cap,
- peer_rx_hibern8_time_cap);
- /* make sure proper unit conversion is applied */
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
- / PA_HIBERN8_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- tuned_pa_hibern8time);
-out:
- return ret;
-}
-
-/**
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
* less than device PA_TACTIVATE time.
* @hba: per-adapter instance
@@ -8548,11 +8371,6 @@ out:
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
- if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
- ufshcd_tune_pa_tactivate(hba);
- ufshcd_tune_pa_hibern8time(hba);
- }
-
ufshcd_vops_apply_dev_quirks(hba);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
@@ -8716,9 +8534,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
if (dev_info->wspecversion < 0x400)
return;
- ufshcd_hold(hba);
-
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -8736,8 +8552,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
}
/**
@@ -10400,7 +10215,7 @@ int ufshcd_system_restore(struct device *dev)
* are updated with the latest queue addresses. Only after
* updating these addresses, we can queue the new commands.
*/
- mb();
+ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
/* Resuming from hibernate, assume that link was OFF */
ufshcd_set_link_off(hba);
@@ -10621,7 +10436,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
- mb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
@@ -10901,7 +10716,6 @@ static void ufshcd_check_header_layout(void)
static struct scsi_driver ufs_dev_wlun_template = {
.gendrv = {
.name = "ufs_device_wlun",
- .owner = THIS_MODULE,
.probe = ufshcd_wl_probe,
.remove = ufshcd_wl_remove,
.pm = &ufshcd_wl_pm_ops,
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index bb30267da471..66811d8d1929 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
* Make sure the register was updated,
* UniPro layer will not work with an incorrect value.
*/
- mb();
+ ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
return 0;
}
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 734d40f99e31..88d125d1ee3c 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -50,6 +50,8 @@
#define HCI_ERR_EN_N_LAYER 0x80
#define HCI_ERR_EN_T_LAYER 0x84
#define HCI_ERR_EN_DME_LAYER 0x88
+#define HCI_V2P1_CTRL 0x8C
+#define IA_TICK_SEL BIT(16)
#define HCI_CLKSTOP_CTRL 0xB0
#define REFCLKOUT_STOP BIT(4)
#define MPHY_APBCLK_STOP BIT(3)
@@ -59,6 +61,7 @@
#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
UNIPRO_PCLK_STOP)
+/* HCI_MISC is also known as HCI_FORCE_HCS */
#define HCI_MISC 0xB4
#define REFCLK_CTRL_EN BIT(7)
#define UNIPRO_PCLK_CTRL_EN BIT(6)
@@ -136,6 +139,9 @@ enum {
/*
* UNIPRO registers
*/
+#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER0 0x7888
+#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER1 0x788c
+#define UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER2 0x7890
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
@@ -306,8 +312,9 @@ static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = attr->pa_dbg_opt_suite1_val;
struct ufs_hba *hba = ufs->hba;
- u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
int i;
exynos_ufs_enable_ov_tm(hba);
@@ -324,12 +331,13 @@ static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
udelay(1);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
+ val | (1 << 12));
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
udelay(1600);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), val);
return 0;
}
@@ -921,14 +929,23 @@ out_exit_phy:
static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
struct ufs_hba *hba = ufs->hba;
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
- DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ if (attr->pa_dbg_clk_period_off)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
ufs->drv_data->uic_attr->tx_trailingclks);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
- ufs->drv_data->uic_attr->pa_dbg_option_suite);
+
+ if (attr->pa_dbg_opt_suite1_off)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
+ attr->pa_dbg_opt_suite1_val);
+
+ if (attr->pa_dbg_opt_suite2_off)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite2_off),
+ attr->pa_dbg_opt_suite2_val);
}
static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
@@ -1005,6 +1022,13 @@ static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
{
u32 val;
+ /* Select function clock (mclk) for timer tick */
+ if (ufs->opts & EXYNOS_UFS_OPT_TIMER_TICK_SELECT) {
+ val = hci_readl(ufs, HCI_V2P1_CTRL);
+ val |= IA_TICK_SEL;
+ hci_writel(ufs, val, HCI_V2P1_CTRL);
+ }
+
val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
}
@@ -1186,7 +1210,10 @@ static int exynos_ufs_init(struct ufs_hba *hba)
if (ret)
goto out;
exynos_ufs_specify_phy_time_attr(ufs);
- exynos_ufs_config_smu(ufs);
+ if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
+ exynos_ufs_config_smu(ufs);
+
+ hba->host->dma_alignment = SZ_4K - 1;
return 0;
out:
@@ -1475,10 +1502,11 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
{
- int i;
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
struct ufs_hba *hba = ufs->hba;
+ int i;
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
@@ -1502,7 +1530,9 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
+ 0x2e820183);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
exynos_ufs_establish_connt(ufs);
@@ -1510,11 +1540,6 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
return 0;
}
-static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev)
-{
- blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1);
-}
-
static int fsd_ufs_post_link(struct exynos_ufs *ufs)
{
int i;
@@ -1571,6 +1596,96 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
return 0;
}
+static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs)
+{
+ return (16 * 1000 * 1000000UL / ufs->mclk_rate);
+}
+
+static int gs101_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int i;
+ u32 tx_line_reset_period, rx_line_reset_period;
+
+ rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate)
+ / NSEC_PER_MSEC;
+ tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate)
+ / NSEC_PER_MSEC;
+
+ unipro_writel(ufs, get_mclk_period_unipro_18(ufs), COMP_CLK_PERIOD);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
+ (rx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
+ (rx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
+ (rx_line_reset_period) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x69);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
+ }
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
+ 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
+ (tx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
+ (tx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
+ (tx_line_reset_period) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x7F, i), 0);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xA006), 0x8000);
+
+ return 0;
+}
+
+static int gs101_ufs_post_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ exynos_ufs_enable_dbg_mode(hba);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8);
+ exynos_ufs_disable_dbg_mode(hba);
+
+ return 0;
+}
+
+static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+ unipro_writel(ufs, 8064, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER0);
+ unipro_writel(ufs, 28224, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER1);
+ unipro_writel(ufs, 20160, UNIPRO_DME_POWERMODE_REQ_LOCALL2TIMER2);
+ unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
+ unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
+ unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
+
+ return 0;
+}
+
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
@@ -1583,7 +1698,6 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.hibern8_notify = exynos_ufs_hibern8_notify,
.suspend = exynos_ufs_suspend,
.resume = exynos_ufs_resume,
- .config_scsi_dev = exynos_ufs_config_scsi_dev,
};
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
@@ -1644,7 +1758,9 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x30103,
+ .pa_dbg_clk_period_off = PA_DBG_CLK_PERIOD,
+ .pa_dbg_opt_suite1_val = 0x30103,
+ .pa_dbg_opt_suite1_off = PA_DBG_OPTION_SUITE,
};
static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
@@ -1696,6 +1812,34 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
.post_pwr_change = exynos7_ufs_post_pwr_change,
};
+static struct exynos_ufs_uic_attr gs101_uic_attr = {
+ .tx_trailingclks = 0xff,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_opt_suite1_val = 0x90913C1C,
+ .pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1,
+ .pa_dbg_opt_suite2_val = 0xE01C115F,
+ .pa_dbg_opt_suite2_off = PA_GS101_DBG_OPTION_SUITE2,
+};
+
static struct exynos_ufs_uic_attr fsd_uic_attr = {
.tx_trailingclks = 0x10,
.tx_dif_p_nsec = 3000000, /* unit: ns */
@@ -1718,7 +1862,9 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
.rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x2E820183,
+ .pa_dbg_clk_period_off = PA_DBG_CLK_PERIOD,
+ .pa_dbg_opt_suite1_val = 0x2E820183,
+ .pa_dbg_opt_suite1_off = PA_DBG_OPTION_SUITE,
};
static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
@@ -1737,7 +1883,27 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.pre_pwr_change = fsd_ufs_pre_pwr_change,
};
+static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
+ .uic_attr = &gs101_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_UFSPR_SECURE |
+ EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
+ .drv_init = exynosauto_ufs_drv_init,
+ .pre_link = gs101_ufs_pre_link,
+ .post_link = gs101_ufs_post_link,
+ .pre_pwr_change = gs101_ufs_pre_pwr_change,
+};
+
static const struct of_device_id exynos_ufs_of_match[] = {
+ { .compatible = "google,gs101-ufs",
+ .data = &gs101_ufs_drvs },
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs",
@@ -1748,6 +1914,7 @@ static const struct of_device_id exynos_ufs_of_match[] = {
.data = &fsd_ufs_drvs },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_ufs_of_match);
static const struct dev_pm_ops exynos_ufs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index a4bd6646d7f1..1646c4a9bb08 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -10,6 +10,12 @@
#define _UFS_EXYNOS_H_
/*
+ * Component registers
+ */
+
+#define COMP_CLK_PERIOD 0x44
+
+/*
* UNIPRO registers
*/
#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150
@@ -30,6 +36,14 @@
#define PA_DBG_OPTION_SUITE_DYN 0x9565
/*
+ * Note: GS101_DBG_OPTION offsets below differ from the TRM
+ * but match the downstream driver. Following the TRM
+ * results in non-functioning UFS.
+ */
+#define PA_GS101_DBG_OPTION_SUITE1 0x956a
+#define PA_GS101_DBG_OPTION_SUITE2 0x956d
+
+/*
* MIBs for Transport Layer debug registers
*/
#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001
@@ -116,7 +130,7 @@ struct exynos_ufs;
#define PA_HIBERN8TIME_VAL 0x20
#define PCLK_AVAIL_MIN 70000000
-#define PCLK_AVAIL_MAX 167000000
+#define PCLK_AVAIL_MAX 267000000
struct exynos_ufs_uic_attr {
/* TX Attributes */
@@ -145,7 +159,11 @@ struct exynos_ufs_uic_attr {
/* Common Attributes */
unsigned int cmn_pwm_clk_ctrl;
/* Internal Attributes */
- unsigned int pa_dbg_option_suite;
+ unsigned int pa_dbg_clk_period_off;
+ unsigned int pa_dbg_opt_suite1_val;
+ unsigned int pa_dbg_opt_suite1_off;
+ unsigned int pa_dbg_opt_suite2_val;
+ unsigned int pa_dbg_opt_suite2_off;
/* Changeable Attributes */
unsigned int rx_adv_fine_gran_sup_en;
unsigned int rx_adv_fine_gran_step;
@@ -221,6 +239,8 @@ struct exynos_ufs {
#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3)
#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4)
#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5)
+#define EXYNOS_UFS_OPT_UFSPR_SECURE BIT(6)
+#define EXYNOS_UFS_OPT_TIMER_TICK_SELECT BIT(7)
};
#define for_each_ufs_rx_lane(ufs, i) \
diff --git a/drivers/ufs/host/ufs-mediatek-sip.h b/drivers/ufs/host/ufs-mediatek-sip.h
new file mode 100644
index 000000000000..7d17aedf6fb8
--- /dev/null
+++ b/drivers/ufs/host/ufs-mediatek-sip.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef _UFS_MEDIATEK_SIP_H
+#define _UFS_MEDIATEK_SIP_H
+
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * SiP (Slicon Partner) commands
+ */
+#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0)
+#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
+#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+#define UFS_MTK_SIP_SRAM_PWR_CTRL BIT(5)
+#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
+#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
+#define UFS_MTK_SIP_MPHY_CTRL BIT(8)
+#define UFS_MTK_SIP_MTCMOS_CTRL BIT(9)
+
+/*
+ * Multi-VCC by Numbering
+ */
+enum ufs_mtk_vcc_num {
+ UFS_VCC_NONE = 0,
+ UFS_VCC_1,
+ UFS_VCC_2,
+ UFS_VCC_MAX
+};
+
+enum ufs_mtk_mphy_op {
+ UFS_MPHY_BACKUP = 0,
+ UFS_MPHY_RESTORE
+};
+
+/*
+ * SMC call wrapper function
+ */
+struct ufs_mtk_smc_arg {
+ unsigned long cmd;
+ struct arm_smccc_res *res;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+ unsigned long v4;
+ unsigned long v5;
+ unsigned long v6;
+ unsigned long v7;
+};
+
+
+static inline void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
+{
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL,
+ s.cmd,
+ s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
+}
+
+#define ufs_mtk_smc(...) \
+ _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
+
+/* Sip kernel interface */
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_crypto_ctrl(res, enable) \
+ ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
+
+#define ufs_mtk_ref_clk_notify(on, stage, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
+
+#define ufs_mtk_sram_pwr_ctrl(on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_SRAM_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_get_vcc_num(res) \
+ ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
+
+#define ufs_mtk_device_pwr_ctrl(on, ufs_version, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_version)
+
+#define ufs_mtk_mphy_ctrl(op, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_MPHY_CTRL, &(res), op)
+
+#define ufs_mtk_mtcmos_ctrl(op, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_MTCMOS_CTRL, &(res), op)
+
+#endif /* !_UFS_MEDIATEK_SIP_H */
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index b8a8801322e2..c4f997196c57 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -19,13 +19,14 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
+
#include "ufs-mediatek.h"
+#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
@@ -51,6 +52,7 @@ static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
{},
};
+MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
/*
* Details of UIC Errors
@@ -118,6 +120,27 @@ static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
}
+static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+}
+
+static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+}
+
+static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -169,16 +192,23 @@ static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
static void ufs_mtk_host_reset(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct arm_smccc_res res;
reset_control_assert(host->hci_reset);
reset_control_assert(host->crypto_reset);
reset_control_assert(host->unipro_reset);
+ reset_control_assert(host->mphy_reset);
usleep_range(100, 110);
reset_control_deassert(host->unipro_reset);
reset_control_deassert(host->crypto_reset);
reset_control_deassert(host->hci_reset);
+ reset_control_deassert(host->mphy_reset);
+
+ /* restore mphy setting aftre mphy reset */
+ if (host->mphy_reset)
+ ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
}
static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
@@ -203,6 +233,8 @@ static void ufs_mtk_init_reset(struct ufs_hba *hba)
"unipro_rst");
ufs_mtk_init_reset_control(hba, &host->crypto_reset,
"crypto_rst");
+ ufs_mtk_init_reset_control(hba, &host->mphy_reset,
+ "mphy_rst");
}
static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
@@ -622,6 +654,15 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
+ if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
+ host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
+
+ if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
+ host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
+
+ if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
+ host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -885,6 +926,9 @@ static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
pdev = container_of(hba->dev, struct platform_device, dev);
+ if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
+ goto failed;
+
for (i = 0; i < host->mcq_nr_intr; i++) {
/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
irq = platform_get_irq(pdev, i + 1);
@@ -923,6 +967,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
struct ufs_mtk_host *host;
struct Scsi_Host *shost = hba->host;
int err = 0;
+ struct arm_smccc_res res;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -951,6 +996,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_init_reset(hba);
+ /* backup mphy setting if mphy can reset */
+ if (host->mphy_reset)
+ ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
+
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
@@ -987,6 +1036,15 @@ static int ufs_mtk_init(struct ufs_hba *hba)
* Enable phy clocks specifically here.
*/
ufs_mtk_mphy_power_on(hba, true);
+
+ if (ufs_mtk_is_rtff_mtcmos(hba)) {
+ /* First Restore here, to avoid backup unexpected value */
+ ufs_mtk_mtcmos_ctrl(false, res);
+
+ /* Power on to init */
+ ufs_mtk_mtcmos_ctrl(true, res);
+ }
+
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
@@ -1303,27 +1361,37 @@ static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
{
- if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
- return;
+ bool skip_vccqx = false;
- /* Skip if VCC is assumed always-on */
- if (!hba->vreg_info.vcc)
- return;
-
- /* Bypass LPM when device is still active */
+ /* Prevent entering LPM when device is still active */
if (lpm && ufshcd_is_ufs_dev_active(hba))
return;
- /* Bypass LPM if VCC is enabled */
- if (lpm && hba->vreg_info.vcc->enabled)
- return;
+ /* Skip vccqx lpm control and control vsx only */
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ skip_vccqx = true;
+
+ /* VCC is always-on, control vsx only */
+ if (!hba->vreg_info.vcc)
+ skip_vccqx = true;
+
+ /* Broken vcc keep vcc always on, most case control vsx only */
+ if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
+ /* Some device vccqx/vsx can enter lpm */
+ if (ufs_mtk_is_allow_vccqx_lpm(hba))
+ skip_vccqx = false;
+ else /* control vsx only */
+ skip_vccqx = true;
+ }
if (lpm) {
- ufs_mtk_vccqx_set_lpm(hba, lpm);
+ if (!skip_vccqx)
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
ufs_mtk_vsx_set_lpm(hba, lpm);
} else {
ufs_mtk_vsx_set_lpm(hba, lpm);
- ufs_mtk_vccqx_set_lpm(hba, lpm);
+ if (!skip_vccqx)
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
}
}
@@ -1374,7 +1442,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
- ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
+ ufs_mtk_sram_pwr_ctrl(false, res);
return 0;
fail:
@@ -1395,7 +1463,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
- ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
+ ufs_mtk_sram_pwr_ctrl(true, res);
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
@@ -1438,6 +1506,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
+ } else if (mid == UFS_VENDOR_MICRON) {
+ /* Only for the host which have TX skew issue */
+ if (ufs_mtk_is_tx_skew_fix(hba) &&
+ (STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
+ STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
+ STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
+ STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
+ STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
+ STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
+ }
}
/*
@@ -1579,6 +1658,12 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ /* MCQ operation not permitted */
+ if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
+ return -EPERM;
+
return MAX_SUPP_MAC;
}
@@ -1790,6 +1875,7 @@ static void ufs_mtk_remove(struct platform_device *pdev)
static int ufs_mtk_system_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
int ret;
ret = ufshcd_system_suspend(dev);
@@ -1798,15 +1884,22 @@ static int ufs_mtk_system_suspend(struct device *dev)
ufs_mtk_dev_vreg_set_lpm(hba, true);
+ if (ufs_mtk_is_rtff_mtcmos(hba))
+ ufs_mtk_mtcmos_ctrl(false, res);
+
return 0;
}
static int ufs_mtk_system_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
ufs_mtk_dev_vreg_set_lpm(hba, false);
+ if (ufs_mtk_is_rtff_mtcmos(hba))
+ ufs_mtk_mtcmos_ctrl(true, res);
+
return ufshcd_system_resume(dev);
}
#endif
@@ -1815,6 +1908,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
int ret = 0;
ret = ufshcd_runtime_suspend(dev);
@@ -1823,12 +1917,19 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
ufs_mtk_dev_vreg_set_lpm(hba, true);
+ if (ufs_mtk_is_rtff_mtcmos(hba))
+ ufs_mtk_mtcmos_ctrl(false, res);
+
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+
+ if (ufs_mtk_is_rtff_mtcmos(hba))
+ ufs_mtk_mtcmos_ctrl(true, res);
ufs_mtk_dev_vreg_set_lpm(hba, false);
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index fb53882f42ca..3ff17e95afab 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,7 +7,6 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* MCQ define and struct
@@ -100,18 +99,6 @@ enum {
};
/*
- * SiP commands
- */
-#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
-#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0)
-#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
-#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
-#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
-#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5)
-#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
-#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
-
-/*
* VS_DEBUGCLOCKENABLE
*/
enum {
@@ -135,7 +122,17 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
+
+ /*
+ * Override UFS_MTK_CAP_BROKEN_VCC's behavior to
+ * allow vccqx upstream to enter LPM
+ */
+ UFS_MTK_CAP_ALLOW_VCCQX_LPM = 1 << 5,
UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6,
+ UFS_MTK_CAP_TX_SKEW_FIX = 1 << 7,
+ UFS_MTK_CAP_DISABLE_MCQ = 1 << 8,
+ /* Control MTCMOS with RTFF */
+ UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9,
};
struct ufs_mtk_crypt_cfg {
@@ -170,6 +167,7 @@ struct ufs_mtk_host {
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
struct reset_control *crypto_reset;
+ struct reset_control *mphy_reset;
struct ufs_hba *hba;
struct ufs_mtk_crypt_cfg *crypt;
struct ufs_mtk_clk mclk;
@@ -191,70 +189,4 @@ struct ufs_mtk_host {
/* MTK delay of autosuspend: 500 ms */
#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500
-/*
- * Multi-VCC by Numbering
- */
-enum ufs_mtk_vcc_num {
- UFS_VCC_NONE = 0,
- UFS_VCC_1,
- UFS_VCC_2,
- UFS_VCC_MAX
-};
-
-/*
- * Host Power Control options
- */
-enum {
- HOST_PWR_HCI = 0,
- HOST_PWR_MPHY
-};
-
-/*
- * SMC call wrapper function
- */
-struct ufs_mtk_smc_arg {
- unsigned long cmd;
- struct arm_smccc_res *res;
- unsigned long v1;
- unsigned long v2;
- unsigned long v3;
- unsigned long v4;
- unsigned long v5;
- unsigned long v6;
- unsigned long v7;
-};
-
-static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
-{
- arm_smccc_smc(MTK_SIP_UFS_CONTROL,
- s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
-}
-
-#define ufs_mtk_smc(...) \
- _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
-
-/*
- * SMC call interface
- */
-#define ufs_mtk_va09_pwr_ctrl(res, on) \
- ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
-
-#define ufs_mtk_crypto_ctrl(res, enable) \
- ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
-
-#define ufs_mtk_ref_clk_notify(on, stage, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
-
-#define ufs_mtk_host_pwr_ctrl(opt, on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on)
-
-#define ufs_mtk_get_vcc_num(res) \
- ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
-
-#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver)
-
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 7a00004bfd03..cca190d1c577 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -284,9 +284,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
-
- /* make sure above configuration is applied before we return */
- mb();
}
/*
@@ -415,7 +412,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG2);
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -507,7 +504,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* make sure above write gets applied before we return from
* this function.
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
}
return 0;
@@ -537,8 +534,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
* and device TX LCC are disabled once link startup is
* completed.
*/
- if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
- err = ufshcd_disable_host_tx_lcc(hba);
+ err = ufshcd_disable_host_tx_lcc(hba);
break;
default:
@@ -696,6 +692,16 @@ static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *ho
int gear = max_t(u32, p->gear_rx, p->gear_tx);
int lane = max_t(u32, p->lane_rx, p->lane_tx);
+ if (WARN_ONCE(gear > QCOM_UFS_MAX_GEAR,
+ "ICC scaling for UFS Gear (%d) not supported. Using Gear (%d) bandwidth\n",
+ gear, QCOM_UFS_MAX_GEAR))
+ gear = QCOM_UFS_MAX_GEAR;
+
+ if (WARN_ONCE(lane > QCOM_UFS_MAX_LANE,
+ "ICC scaling for UFS Lane (%d) not supported. Using Lane (%d) bandwidth\n",
+ lane, QCOM_UFS_MAX_LANE))
+ lane = QCOM_UFS_MAX_LANE;
+
if (ufshcd_is_hs_mode(p)) {
if (p->hs_rate == PA_HS_MODE_B)
return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
@@ -1451,11 +1457,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
- /*
- * Make sure the test bus configuration is
- * committed before returning.
- */
- mb();
return 0;
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 9dd9a391ebb7..b9de170983c9 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
/*
- * Make sure assertion of ufs phy reset is written to
- * register before returning
+ * Dummy read to ensure the write takes effect before doing any sort
+ * of delay
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG1);
}
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
@@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
/*
- * Make sure de-assertion of ufs phy reset is written to
- * register before returning
+ * Dummy read to ensure the write takes effect before doing any sort
+ * of delay
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG1);
}
/* Host controller hardware version: major.minor.step */
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 8c8fa71c69c4..9f758241d9d3 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -328,12 +328,6 @@ static int mts_slave_alloc (struct scsi_device *s)
return 0;
}
-static int mts_slave_configure (struct scsi_device *s)
-{
- blk_queue_dma_alignment(s->request_queue, (512 - 1));
- return 0;
-}
-
static int mts_scsi_abort(struct scsi_cmnd *srb)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
@@ -631,8 +625,8 @@ static const struct scsi_host_template mts_scsi_host_template = {
.can_queue = 1,
.this_id = -1,
.emulated = 1,
+ .dma_alignment = 511,
.slave_alloc = mts_slave_alloc,
- .slave_configure = mts_slave_configure,
.max_sectors= 256, /* 128 K */
};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 12cf9940e5b6..b31464740f6c 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -40,7 +40,6 @@
#include <scsi/scsi_eh.h>
#include "usb.h"
-#include <linux/usb/hcd.h>
#include "scsiglue.h"
#include "debug.h"
#include "transport.h"
@@ -76,12 +75,6 @@ static int slave_alloc (struct scsi_device *sdev)
*/
sdev->inquiry_len = 36;
- /*
- * Some host controllers may have alignment requirements.
- * We'll play it safe by requiring 512-byte alignment always.
- */
- blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
-
/* Tell the SCSI layer if we know there is more than one LUN */
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
@@ -89,7 +82,7 @@ static int slave_alloc (struct scsi_device *sdev)
return 0;
}
-static int slave_configure(struct scsi_device *sdev)
+static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct us_data *us = host_to_us(sdev->host);
struct device *dev = us->pusb_dev->bus->sysdev;
@@ -104,40 +97,28 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_SIZE >> 9;
- if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
- blk_queue_max_hw_sectors(sdev->request_queue,
- max_sectors);
+ lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors);
} else if (sdev->type == TYPE_TAPE) {
/*
* Tapes need much higher max_sector limits, so just
* raise it to the maximum possible (4 GB / 512) and
* let the queue segment size sort out the real limit.
*/
- blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
+ lim->max_hw_sectors = 0x7FFFFF;
} else if (us->pusb_dev->speed >= USB_SPEED_SUPER) {
/*
* USB3 devices will be limited to 2048 sectors. This gives us
* better throughput on most devices.
*/
- blk_queue_max_hw_sectors(sdev->request_queue, 2048);
+ lim->max_hw_sectors = 2048;
}
/*
* The max_hw_sectors should be up to maximum size of a mapping for
* the device. Otherwise, a DMA API might fail on swiotlb environment.
*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
- dma_max_mapping_size(dev) >> SECTOR_SHIFT));
-
- /*
- * Some USB host controllers can't do DMA; they have to use PIO.
- * For such controllers we need to make sure the block layer sets
- * up bounce buffers in addressable memory.
- */
- if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
- (bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL))
- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
+ lim->max_hw_sectors = min_t(size_t,
+ lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT);
/*
* We can't put these settings in slave_alloc() because that gets
@@ -598,13 +579,22 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
+ struct queue_limits lim;
unsigned short ms;
+ int ret;
- if (sscanf(buf, "%hu", &ms) > 0) {
- blk_queue_max_hw_sectors(sdev->request_queue, ms);
- return count;
- }
- return -EINVAL;
+ if (sscanf(buf, "%hu", &ms) <= 0)
+ return -EINVAL;
+
+ blk_mq_freeze_queue(sdev->request_queue);
+ lim = queue_limits_start_update(sdev->request_queue);
+ lim.max_hw_sectors = ms;
+ ret = queue_limits_commit_update(sdev->request_queue, &lim);
+ blk_mq_unfreeze_queue(sdev->request_queue);
+
+ if (ret)
+ return ret;
+ return count;
}
static DEVICE_ATTR_RW(max_sectors);
@@ -642,12 +632,17 @@ static const struct scsi_host_template usb_stor_host_template = {
.this_id = -1,
.slave_alloc = slave_alloc,
- .slave_configure = slave_configure,
+ .device_configure = device_configure,
.target_alloc = target_alloc,
/* lots of sg segments can be handled */
.sg_tablesize = SG_MAX_SEGMENTS,
+ /*
+ * Some host controllers may have alignment requirements.
+ * We'll play it safe by requiring 512-byte alignment always.
+ */
+ .dma_alignment = 511,
/*
* Limit the total size of a transfer to 120 KB.
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 08953f0d4532..a48870a87a29 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -821,26 +821,19 @@ static int uas_slave_alloc(struct scsi_device *sdev)
(struct uas_dev_info *)sdev->host->hostdata;
sdev->hostdata = devinfo;
-
- /*
- * The protocol has no requirements on alignment in the strict sense.
- * Controllers may or may not have alignment restrictions.
- * As this is not exported, we use an extremely conservative guess.
- */
- blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
-
- if (devinfo->flags & US_FL_MAX_SECTORS_64)
- blk_queue_max_hw_sectors(sdev->request_queue, 64);
- else if (devinfo->flags & US_FL_MAX_SECTORS_240)
- blk_queue_max_hw_sectors(sdev->request_queue, 240);
-
return 0;
}
-static int uas_slave_configure(struct scsi_device *sdev)
+static int uas_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct uas_dev_info *devinfo = sdev->hostdata;
+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
+ lim->max_hw_sectors = 64;
+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+ lim->max_hw_sectors = 240;
+
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
sdev->no_report_opcodes = 1;
@@ -905,11 +898,17 @@ static const struct scsi_host_template uas_host_template = {
.queuecommand = uas_queuecommand,
.target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
- .slave_configure = uas_slave_configure,
+ .device_configure = uas_device_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
.skip_settle_delay = 1,
+ /*
+ * The protocol has no requirements on alignment in the strict sense.
+ * Controllers may or may not have alignment restrictions.
+ * As this is not exported, we use an extremely conservative guess.
+ */
+ .dma_alignment = 511,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = sizeof(struct uas_cmd_info),
};
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d1ad6a2509ab..a49a31639f6f 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -47,6 +47,7 @@
#include <scsi/scsi_device.h>
#include "usb.h"
+#include <linux/usb/hcd.h>
#include "scsiglue.h"
#include "transport.h"
#include "protocol.h"
@@ -961,6 +962,15 @@ int usb_stor_probe1(struct us_data **pus,
if (result)
goto BadDevice;
+ /*
+ * Some USB host controllers can't do DMA; they have to use PIO.
+ * For such controllers we need to make sure the block layer sets
+ * up bounce buffers in addressable memory.
+ */
+ if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
+ bus_to_hcd(us->pusb_dev->bus)->localmem_pool)
+ host->no_highmem = true;
+
/* Get the unusual_devs entries and the descriptors */
result = get_device_info(us, id, unusual_dev);
if (result)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 69c4f113db42..fd5951dd6b7d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -897,18 +897,25 @@ int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+/**
+ * queue_limits_cancel_update - cancel an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions cancels an atomic update of the queue limits started by
+ * queue_limits_start_update() and should be used when an error occurs after
+ * starting update.
+ */
+static inline void queue_limits_cancel_update(struct request_queue *q)
+{
+ mutex_unlock(&q->limits_lock);
+}
+
/*
* Access functions for manipulating queue properties
*/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
-extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_discard_segments(struct request_queue *,
- unsigned short);
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
unsigned int max_sectors);
-extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
@@ -925,7 +932,6 @@ void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
-extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -933,10 +939,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_dma_alignment(struct request_queue *, int);
-extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -945,9 +947,6 @@ disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *iars);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 9e97ced2896d..14fa93268630 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -65,7 +65,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size);
void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 324d792e7c78..13fb41d25da6 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1152,12 +1152,19 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
-extern int ata_scsi_slave_config(struct scsi_device *sdev);
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
int queue_depth);
+extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported);
+extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled);
+extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1244,7 +1251,8 @@ extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1410,13 +1418,13 @@ extern const struct attribute_group *ata_common_sdev_groups[];
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 5894bf912f7b..88c6a76042ee 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -433,8 +433,8 @@ struct mmc_host {
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
- unsigned int max_seg_size; /* see blk_queue_max_segment_size */
- unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_seg_size; /* lim->max_segment_size */
+ unsigned short max_segs; /* lim->max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
diff --git a/include/scsi/iser.h b/include/scsi/iser.h
index 2e678fa74eca..07a83bfa2b8e 100644
--- a/include/scsi/iser.h
+++ b/include/scsi/iser.h
@@ -63,7 +63,7 @@ struct iser_cm_hdr {
* @rsvd: reserved
* @write_stag: write rkey
* @write_va: write virtual address
- * @reaf_stag: read rkey
+ * @read_stag: read rkey
* @read_va: read virtual address
*/
struct iser_ctrl {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index eca6fd42d7f7..4a9b4169e081 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -44,11 +44,16 @@
* @LPORT_ST_DISABLED: Disabled
* @LPORT_ST_FLOGI: Fabric login (FLOGI) sent
* @LPORT_ST_DNS: Waiting for name server remote port to become ready
- * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent
+ * @LPORT_ST_RNN_ID: Register port name by ID (RNN_ID) sent
+ * @LPORT_ST_RSNN_NN: Waiting for host symbolic node name
+ * @LPORT_ST_RSPN_ID: Waiting for host symbolic port name
* @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent
* @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent
* @LPORT_ST_FDMI: Waiting for mgmt server rport to become ready
- * @LPORT_ST_RHBA:
+ * @LPORT_ST_RHBA: Register HBA
+ * @LPORT_ST_RPA: Register Port Attributes
+ * @LPORT_ST_DHBA: Deregister HBA
+ * @LPORT_ST_DPRT: Deregister Port
* @LPORT_ST_SCR: State Change Register (SCR) sent
* @LPORT_ST_READY: Ready for use
* @LPORT_ST_LOGO: Local port logout (LOGO) sent
@@ -183,7 +188,7 @@ struct fc_rport_libfc_priv {
* @r_a_tov: Resource allocation timeout value (in msec)
* @rp_mutex: The mutex that protects the remote port
* @retry_work: Handle for retries
- * @event_callback: Callback when READY, FAILED or LOGO states complete
+ * @lld_event_callback: Callback when READY, FAILED or LOGO states complete
* @prli_count: Count of open PRLI sessions in providers
* @rcu: Structure used for freeing in an RCU-safe manner
*/
@@ -289,6 +294,7 @@ struct fc_seq_els_data {
* @timer: The command timer
* @tm_done: Completion indicator
* @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies)
+ * @timer_delay: FCP packet timer delay in jiffies
* @data_len: The length of the data
* @cdb_cmd: The CDB command
* @xfer_len: The transfer length
@@ -788,6 +794,8 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *);
/**
* fc_lport_test_ready() - Determine if a local port is in the READY state
* @lport: The local port to test
+ *
+ * Returns: %true if local port is in the READY state, %false otherwise
*/
static inline int fc_lport_test_ready(struct fc_lport *lport)
{
@@ -830,6 +838,8 @@ static inline void fc_lport_state_enter(struct fc_lport *lport,
/**
* fc_lport_init_stats() - Allocate per-CPU statistics for a local port
* @lport: The local port whose statistics are to be initialized
+ *
+ * Returns: %0 on success, %-ENOMEM on failure
*/
static inline int fc_lport_init_stats(struct fc_lport *lport)
{
@@ -851,6 +861,8 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
/**
* lport_priv() - Return the private data from a local port
* @lport: The local port whose private data is to be retrieved
+ *
+ * Returns: the local port's private data pointer
*/
static inline void *lport_priv(const struct fc_lport *lport)
{
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 8300ef1a982e..3c5899290aed 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -157,7 +157,9 @@ struct fcoe_ctlr {
/**
* fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
- * @cltr: The fcoe_ctlr whose private data will be returned
+ * @ctlr: The fcoe_ctlr whose private data will be returned
+ *
+ * Returns: pointer to the private data
*/
static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
{
@@ -174,7 +176,6 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
* struct fcoe_fcf - Fibre-Channel Forwarder
* @list: list linkage
* @event_work: Work for FC Transport actions queue
- * @event: The event to be processed
* @fip: The controller that the FCF was discovered on
* @fcf_dev: The associated fcoe_fcf_device instance
* @time: system time (jiffies) when an advertisement was last received
@@ -188,6 +189,7 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
* @flogi_sent: current FLOGI sent to this FCF
* @flags: flags received from advertisement
* @fka_period: keep-alive period, in jiffies
+ * @fd_flags: no need for FKA from ENode
*
* A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that
* passes FCoE frames on to an FC fabric. This structure represents
@@ -222,6 +224,7 @@ struct fcoe_fcf {
/**
* struct fcoe_rport - VN2VN remote port
+ * @rdata: libfc remote port private data
* @time: time of create or last beacon packet received from node
* @fcoe_len: max FCoE frame size, not including VLAN or Ethernet headers
* @flags: flags from probe or claim
@@ -266,8 +269,10 @@ void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
/**
- * is_fip_mode() - returns true if FIP mode selected.
+ * is_fip_mode() - test if FIP mode selected.
* @fip: FCoE controller.
+ *
+ * Returns: %true if FIP mode is selected
*/
static inline bool is_fip_mode(struct fcoe_ctlr *fip)
{
@@ -318,9 +323,10 @@ struct fcoe_transport {
* @kthread: The thread context (used by bnx2fc)
* @work: The work item (used by fcoe)
* @fcoe_rx_list: The queue of pending packets to process
- * @page: The memory page for calculating frame trailer CRCs
+ * @crc_eof_page: The memory page for calculating frame trailer CRCs
* @crc_eof_offset: The offset into the CRC page pointing to available
* memory for a new trailer
+ * @lock: local lock for members of this struct
*/
struct fcoe_percpu_s {
struct task_struct *kthread;
@@ -343,7 +349,8 @@ struct fcoe_percpu_s {
* @timer: The queue timer
* @destroy_work: Handle for work context
* (to prevent RTNL deadlocks)
- * @data_srt_addr: Source address for data
+ * @data_src_addr: Source address for data
+ * @get_netdev: function that returns a &net_device from @lport
*
* An instance of this structure is to be allocated along with the
* Scsi_Host and libfc fc_lport structures.
@@ -364,6 +371,8 @@ struct fcoe_port {
/**
* fcoe_get_netdev() - Return the net device associated with a local port
* @lport: The local port to get the net device from
+ *
+ * Returns: the &net_device associated with this @lport
*/
static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport)
{
@@ -383,8 +392,10 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *);
/**
- * struct netdev_list
- * A mapping from netdevice to fcoe_transport
+ * struct fcoe_netdev_mapping - A mapping from &net_device to &fcoe_transport
+ * @list: list linkage of the mappings
+ * @netdev: the &net_device
+ * @ft: the fcoe_transport associated with @netdev
*/
struct fcoe_netdev_mapping {
struct list_head list;
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index f5257103fdb6..1324068dd950 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -683,7 +683,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
-extern int sas_slave_configure(struct scsi_device *);
+int sas_device_configure(struct scsi_device *dev,
+ struct queue_limits *lim);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
@@ -726,4 +727,33 @@ void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
gfp_t gfp_flags);
+#define __LIBSAS_SHT_BASE \
+ .module = THIS_MODULE, \
+ .name = DRV_NAME, \
+ .proc_name = DRV_NAME, \
+ .queuecommand = sas_queuecommand, \
+ .dma_need_drain = ata_scsi_dma_need_drain, \
+ .target_alloc = sas_target_alloc, \
+ .change_queue_depth = sas_change_queue_depth, \
+ .bios_param = sas_bios_param, \
+ .this_id = -1, \
+ .eh_device_reset_handler = sas_eh_device_reset_handler, \
+ .eh_target_reset_handler = sas_eh_target_reset_handler, \
+ .target_destroy = sas_target_destroy, \
+ .ioctl = sas_ioctl, \
+
+#ifdef CONFIG_COMPAT
+#define _LIBSAS_SHT_BASE __LIBSAS_SHT_BASE \
+ .compat_ioctl = sas_ioctl,
+#else
+#define _LIBSAS_SHT_BASE __LIBSAS_SHT_BASE
+#endif
+
+#define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \
+ .device_configure = sas_device_configure, \
+ .slave_alloc = sas_slave_alloc, \
+
+#define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE
+
+
#endif /* _SASLIB_H_ */
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 2f8c719840a6..92e27e7bf088 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -39,6 +39,9 @@ int smp_ata_check_ready_type(struct ata_link *link);
int sas_discover_sata(struct domain_device *dev);
int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
struct domain_device *child, int phy_id);
+
+extern const struct attribute_group sas_ata_sdev_attr_group;
+
#else
static inline void sas_ata_disabled_notice(void)
@@ -123,6 +126,9 @@ static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *p
sas_ata_disabled_notice();
return -ENODEV;
}
+
+#define sas_ata_sdev_attr_group ((struct attribute_group) {})
+
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 4498f845b112..96b350366670 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -7,8 +7,9 @@
#define _SCSI_SCSI_H
#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <linux/kernel.h>
+
+#include <asm/param.h>
+
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_status.h>
@@ -69,7 +70,7 @@ static inline int scsi_is_wlun(u64 lun)
* @status: the status passed up from the driver (including host and
* driver components)
*
- * This returns true if the status code is SAM_STAT_CHECK_CONDITION.
+ * Returns: %true if the status code is SAM_STAT_CHECK_CONDITION.
*/
static inline int scsi_status_is_check_condition(int status)
{
@@ -189,12 +190,13 @@ enum scsi_disposition {
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
-/** scsi_status_is_good - check the status return.
+/**
+ * scsi_status_is_good - check the status return.
*
* @status: the status passed up from the driver (including host and
* driver components)
*
- * This returns true for known good conditions that may be treated as
+ * Returns: %true for known good conditions that may be treated as
* command completed normally
*/
static inline bool scsi_status_is_good(int status)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 526def14e7fb..45c40d200154 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -353,6 +353,8 @@ static inline u8 get_host_byte(struct scsi_cmnd *cmd)
/**
* scsi_msg_to_host_byte() - translate message byte
+ * @cmd: the SCSI command
+ * @msg: the SCSI parallel message byte to translate
*
* Translate the SCSI parallel message byte to a matching
* host byte setting. A message of COMMAND_COMPLETE indicates
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index f40915d2ecee..c0e89996bdb3 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -23,7 +23,9 @@ struct scsi_driver {
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)
-extern int scsi_register_driver(struct device_driver *);
+#define scsi_register_driver(drv) \
+ __scsi_register_driver(drv, THIS_MODULE)
+int __scsi_register_driver(struct device_driver *, struct module *);
#define scsi_unregister_driver(drv) \
driver_unregister(drv);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 129001f600fc..19a1c5c48935 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -211,7 +211,11 @@ struct scsi_host_template {
* up after yourself before returning non-0
*
* Status: OPTIONAL
+ *
+ * Note: slave_configure is the legacy version, use device_configure for
+ * all new code. A driver must never define both.
*/
+ int (* device_configure)(struct scsi_device *, struct queue_limits *lim);
int (* slave_configure)(struct scsi_device *);
/*
@@ -405,6 +409,8 @@ struct scsi_host_template {
*/
unsigned int max_segment_size;
+ unsigned int dma_alignment;
+
/*
* DMA scatter gather segment boundary limit. A segment crossing this
* boundary will be split in two.
@@ -614,6 +620,7 @@ struct Scsi_Host {
unsigned int max_sectors;
unsigned int opt_sectors;
unsigned int max_segment_size;
+ unsigned int dma_alignment;
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
/*
@@ -665,6 +672,8 @@ struct Scsi_Host {
/* The transport requires the LUN bits NOT to be stored in CDB[1] */
unsigned no_scsi2_lun_in_cdb:1;
+ unsigned no_highmem:1;
+
/*
* Optional work queue to be utilized by the transport
*/
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index a0458bda3148..1394cf313bb3 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -83,6 +83,6 @@ scsi_transport_device_data(struct scsi_device *sdev)
+ shost->transportt->device_private_offset;
}
-void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q);
+void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim);
#endif /* SCSI_TRANSPORT_H */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 483513c57597..4b884b8013e0 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -709,6 +709,7 @@ struct fc_function_template {
int (*vport_delete)(struct fc_vport *);
/* bsg support */
+ u32 max_bsg_segments;
int (*bsg_request)(struct bsg_job *);
int (*bsg_timeout)(struct bsg_job *);
@@ -770,10 +771,9 @@ struct fc_function_template {
/**
* fc_remote_port_chkready - called to validate the remote port state
* prior to initiating io to the port.
- *
- * Returns a scsi result code that can be returned by the LLDD.
- *
* @rport: remote port to be checked
+ *
+ * Returns: a scsi result code that can be returned by the LLDD.
**/
static inline int
fc_remote_port_chkready(struct fc_rport *rport)
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index dfc78aa112ad..5b70b538447e 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -74,7 +74,7 @@ struct srp_rport {
};
/**
- * struct srp_function_template
+ * struct srp_function_template - template for SRP initiator drivers
*
* Fields that are only relevant for SRP initiator drivers:
* @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and
@@ -124,7 +124,7 @@ enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd);
* srp_chkready() - evaluate the transport layer state before I/O
* @rport: SRP target port pointer.
*
- * Returns a SCSI result code that can be returned by the LLD queuecommand()
+ * Returns: a SCSI result code that can be returned by the LLD queuecommand()
* implementation. The role of this function is similar to that of
* fc_remote_port_chkready().
*/
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
index 30a5c1a59376..a3ba779a3f78 100644
--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -401,7 +401,7 @@ struct mpi3mr_buf_entry {
__u32 buf_len;
};
/**
- * struct mpi3mr_bsg_buf_entry_list - list of user buffer
+ * struct mpi3mr_buf_entry_list - list of user buffer
* descriptor for MPI Passthrough requests.
*
* @num_of_entries: Number of buffer descriptors
@@ -424,6 +424,7 @@ struct mpi3mr_buf_entry_list {
* @mrioc_id: Controller ID
* @rsvd1: Reserved
* @timeout: MPI request timeout
+ * @rsvd2: Reserved
* @buf_entry_list: Buffer descriptor list
*/
struct mpi3mr_bsg_mptcmd {
@@ -441,8 +442,9 @@ struct mpi3mr_bsg_mptcmd {
* @cmd_type: represents drvrcmd or mptcmd
* @rsvd1: Reserved
* @rsvd2: Reserved
- * @drvrcmd: driver request structure
- * @mptcmd: mpt request structure
+ * @rsvd3: Reserved
+ * @cmd.drvrcmd: driver request structure
+ * @cmd.mptcmd: mpt request structure
*/
struct mpi3mr_bsg_packet {
__u8 cmd_type;
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 03f2beadf201..8c29e498ef98 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -123,6 +123,7 @@ struct utp_upiu_query {
* @idn: a value that indicates the particular type of data B-1
* @index: Index to further identify data B-2
* @selector: Index to further identify data B-3
+ * @osf3: spec field B-4
* @osf4: spec field B-5
* @osf5: spec field B 6,7
* @osf6: spec field DW 8,9
@@ -138,12 +139,13 @@ struct utp_upiu_query_v4_0 {
__be16 osf5;
__be32 osf6;
__be32 osf7;
+ /* private: */
__be32 reserved;
};
/**
* struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
+ * @exp_data_transfer_len: Data Transfer Length DW-3
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
*/
struct utp_upiu_cmd {
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index a35e12f8e68b..bad88bd91995 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -374,7 +374,6 @@ struct ufs_hba_variant_ops {
int (*get_outstanding_cqs)(struct ufs_hba *hba,
unsigned long *ocqs);
int (*config_esi)(struct ufs_hba *hba);
- void (*config_scsi_dev)(struct scsi_device *sdev);
};
/* clock gating state */
@@ -1390,8 +1389,6 @@ void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
-
int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index a196e1c4c3bb..385e1c6b8d60 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -355,12 +355,8 @@ enum {
/* Interrupt disable masks */
enum {
- /* Interrupt disable mask for UFSHCI v1.0 */
- INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
- INTERRUPT_MASK_RW_VER_10 = 0x30000,
-
/* Interrupt disable mask for UFSHCI v1.1 */
- INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+ INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
/* Interrupt disable mask for UFSHCI v2.1 */
INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
@@ -425,13 +421,6 @@ union ufs_crypto_cfg_entry {
* Request Descriptor Definitions
*/
-/* Transfer request command type */
-enum {
- UTP_CMD_TYPE_SCSI = 0x0,
- UTP_CMD_TYPE_UFS = 0x1,
- UTP_CMD_TYPE_DEV_MANAGE = 0x2,
-};
-
/* To accommodate UFS2.0 required Command type */
enum {
UTP_CMD_TYPE_UFS_STORAGE = 0x1,