summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/evregion.c47
-rw-r--r--drivers/acpi/acpica/exfield.c67
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/container.c8
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/base/regmap/Kconfig3
-rw-r--r--drivers/base/regmap/internal.h6
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap.c86
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/bus/omap_l3_noc.h50
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c4
-rw-r--r--drivers/char/hw_random/intel-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/hw_random/xgene-rng.c423
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/tile-srom.c13
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/crypto/caam/caamhash.c28
-rw-r--r--drivers/crypto/caam/ctrl.c138
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/regs.h51
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h54
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev.c14
-rw-r--r--drivers/crypto/mv_cesa.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c66
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c14
-rw-r--r--drivers/crypto/qce/dma.h2
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c104
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/ioat/dca.c13
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma.h7
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v3.c7
-rw-r--r--drivers/dma/iovlock.c280
-rw-r--r--drivers/dma/mv_xor.c80
-rw-r--r--drivers/dma/omap-dma.c5
-rw-r--r--drivers/edac/amd64_edac.c146
-rw-r--r--drivers/edac/amd64_edac.h5
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/gpio/gpiolib-acpi.c5
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c44
-rw-r--r--drivers/gpu/drm/radeon/si.c8
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c6
-rw-r--r--drivers/hid/hid-logitech-dj.c35
-rw-r--r--drivers/hid/hid-penmount.c49
-rw-r--r--drivers/hid/hid-picolcd_core.c4
-rw-r--r--drivers/hid/hid-rmi.c44
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c100
-rw-r--r--drivers/hid/hid-thingm.c7
-rw-r--r--drivers/hid/uhid.c394
-rw-r--r--drivers/hid/usbhid/hid-core.c60
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom.h6
-rw-r--r--drivers/hid/wacom_sys.c271
-rw-r--r--drivers/hid/wacom_wac.c312
-rw-r--r--drivers/hid/wacom_wac.h17
-rw-r--r--drivers/i2c/Makefile5
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c12
-rw-r--r--drivers/i2c/busses/i2c-rcar.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c13
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c57
-rw-r--r--drivers/i2c/i2c-acpi.c364
-rw-r--r--drivers/i2c/i2c-core.c364
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c2
-rw-r--r--drivers/input/gameport/gameport.c41
-rw-r--r--drivers/input/joystick/analog.c71
-rw-r--r--drivers/input/joystick/xpad.c34
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c92
-rw-r--r--drivers/input/misc/Kconfig43
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/drv260x.c741
-rw-r--r--drivers/input/misc/drv2667.c500
-rw-r--r--drivers/input/misc/max77693-haptic.c357
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c332
-rw-r--r--drivers/input/misc/soc_button_array.c60
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/focaltech.c52
-rw-r--r--drivers/input/mouse/focaltech.h22
-rw-r--r--drivers/input/mouse/psmouse-base.c30
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c17
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/amd_iommu_v2.c6
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/md/raid1.c40
-rw-r--r--drivers/md/raid5.c18
-rw-r--r--drivers/media/common/cx2341x.c1
-rw-r--r--drivers/media/dvb-frontends/cx24123.c1
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c2
-rw-r--r--drivers/media/radio/radio-miropcm20.c1
-rw-r--r--drivers/media/rc/keymaps/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c25
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig9
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c48
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c2
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/message/fusion/mptctl.c7
-rw-r--r--drivers/message/fusion/mptspi.c5
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/usb/r8152.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c2
-rw-r--r--drivers/nfc/microread/microread.c16
-rw-r--r--drivers/nfc/st21nfca/Makefile5
-rw-r--r--drivers/nfc/st21nfcb/Makefile5
-rw-r--r--drivers/of/base.c16
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c14
-rw-r--r--drivers/of/platform.c7
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/probe.c20
-rw-r--r--drivers/pinctrl/Kconfig103
-rw-r--r--drivers/pinctrl/Makefile23
-rw-r--r--drivers/pinctrl/berlin/berlin.c29
-rw-r--r--drivers/pinctrl/freescale/Kconfig108
-rw-r--r--drivers/pinctrl/freescale/Makefile19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c (renamed from drivers/pinctrl/pinctrl-imx.c)17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h (renamed from drivers/pinctrl/pinctrl-imx.h)7
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c (renamed from drivers/pinctrl/pinctrl-imx1-core.c)8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c (renamed from drivers/pinctrl/pinctrl-imx1.c)0
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.h (renamed from drivers/pinctrl/pinctrl-imx1.h)0
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c342
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c (renamed from drivers/pinctrl/pinctrl-imx23.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c (renamed from drivers/pinctrl/pinctrl-imx25.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c (renamed from drivers/pinctrl/pinctrl-imx27.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c (renamed from drivers/pinctrl/pinctrl-imx28.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c (renamed from drivers/pinctrl/pinctrl-imx35.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c (renamed from drivers/pinctrl/pinctrl-imx50.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c (renamed from drivers/pinctrl/pinctrl-imx51.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c (renamed from drivers/pinctrl/pinctrl-imx53.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c (renamed from drivers/pinctrl/pinctrl-imx6dl.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c (renamed from drivers/pinctrl/pinctrl-imx6q.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c (renamed from drivers/pinctrl/pinctrl-imx6sl.c)3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c (renamed from drivers/pinctrl/pinctrl-imx6sx.c)2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c (renamed from drivers/pinctrl/pinctrl-mxs.c)8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h (renamed from drivers/pinctrl/pinctrl-mxs.h)0
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c (renamed from drivers/pinctrl/pinctrl-vf610.c)2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c99
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c142
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c7
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91.c212
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c8
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c8
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c6
-rw-r--r--drivers/pinctrl/pinctrl-single.c18
-rw-r--r--drivers/pinctrl/pinctrl-st.c7
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c8
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c7
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c69
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c2
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c7
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c6
-rw-r--r--drivers/pinctrl/pinctrl-u300.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinmux.c10
-rw-r--r--drivers/pinctrl/qcom/Kconfig8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c9
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1245
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c49
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c7
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c7
-rw-r--r--drivers/pinctrl/sh-pfc/core.c10
-rw-r--r--drivers/pinctrl/sh-pfc/core.h1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c23
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c6
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c129
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c173
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c72
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c8
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/regulator/Kconfig83
-rw-r--r--drivers/regulator/Makefile9
-rw-r--r--drivers/regulator/as3711-regulator.c61
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/bcm590xx-regulator.c8
-rw-r--r--drivers/regulator/core.c210
-rw-r--r--drivers/regulator/da9211-regulator.c174
-rw-r--r--drivers/regulator/da9211-regulator.h7
-rw-r--r--drivers/regulator/fan53555.c195
-rw-r--r--drivers/regulator/hi6421-regulator.c634
-rw-r--r--drivers/regulator/internal.h14
-rw-r--r--drivers/regulator/isl9305.c207
-rw-r--r--drivers/regulator/ltc3589.c1
-rw-r--r--drivers/regulator/max1586.c81
-rw-r--r--drivers/regulator/max77802.c586
-rw-r--r--drivers/regulator/mc13892-regulator.c11
-rw-r--r--drivers/regulator/of_regulator.c51
-rw-r--r--drivers/regulator/pwm-regulator.c197
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c798
-rw-r--r--drivers/regulator/rk808-regulator.c381
-rw-r--r--drivers/regulator/rn5t618-regulator.c143
-rw-r--r--drivers/regulator/s2mpa01.c144
-rw-r--r--drivers/regulator/s2mps11.c269
-rw-r--r--drivers/regulator/sky81452-regulator.c130
-rw-r--r--drivers/regulator/st-pwm.c190
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps65217-regulator.c114
-rw-r--r--drivers/regulator/tps65910-regulator.c13
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h146
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c120
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2326
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c40
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h24
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c31
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c42
-rw-r--r--drivers/scsi/be2iscsi/be_main.h8
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c15
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c15
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/csiostor/csio_hw.h2
-rw-r--r--drivers/scsi/csiostor/csio_isr.c24
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c59
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c39
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c2
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_trace.c7
-rw-r--r--drivers/scsi/hpsa.c70
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c10
-rw-r--r--drivers/scsi/libfc/fc_libfc.c4
-rw-r--r--drivers/scsi/libiscsi.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c225
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c247
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h161
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1099
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c327
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c439
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h105
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h12
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h29
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h74
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h44
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c328
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h28
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c197
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h18
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h64
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h45
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c287
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c78
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h2
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c8
-rw-r--r--drivers/scsi/pmcraid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h191
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c943
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c693
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h31
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c45
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c136
-rw-r--r--drivers/scsi/scsi_error.c27
-rw-r--r--drivers/scsi/scsi_lib.c24
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs.h132
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c55
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c291
-rw-r--r--drivers/scsi/ufs/ufshcd.c2514
-rw-r--r--drivers/scsi/ufs/ufshcd.h280
-rw-r--r--drivers/scsi/ufs/ufshci.h9
-rw-r--r--drivers/scsi/ufs/unipro.h56
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c46
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-bcm53xx.c299
-rw-r--r--drivers/spi/spi-bcm53xx.h72
-rw-r--r--drivers/spi/spi-cadence.c1
-rw-r--r--drivers/spi/spi-clps711x.c34
-rw-r--r--drivers/spi/spi-davinci.c63
-rw-r--r--drivers/spi/spi-dw-mid.c65
-rw-r--r--drivers/spi/spi-dw-pci.c67
-rw-r--r--drivers/spi/spi-dw.c64
-rw-r--r--drivers/spi/spi-dw.h11
-rw-r--r--drivers/spi/spi-ep93xx.c1
-rw-r--r--drivers/spi/spi-fsl-cpm.c14
-rw-r--r--drivers/spi/spi-fsl-dspi.c22
-rw-r--r--drivers/spi/spi-fsl-espi.c12
-rw-r--r--drivers/spi/spi-fsl-lib.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c22
-rw-r--r--drivers/spi/spi-imx.c286
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-omap-100k.c4
-rw-r--r--drivers/spi/spi-orion.c121
-rw-r--r--drivers/spi/spi-pl022.c64
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c20
-rw-r--r--drivers/spi/spi-rockchip.c15
-rw-r--r--drivers/spi/spi-rspi.c55
-rw-r--r--drivers/spi/spi-sh-msiof.c51
-rw-r--r--drivers/spi/spi-sirf.c109
-rw-r--r--drivers/spi/spi-tegra114.c9
-rw-r--r--drivers/spi/spi-tegra20-sflash.c3
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi-xilinx.c1
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c1
-rw-r--r--drivers/spi/spi.c45
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/storage/usb.c8
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
485 files changed, 23346 insertions, 6680 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fddc1e86f9d0..b0ea767c8696 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -419,7 +419,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev);
if (!IS_ERR_OR_NULL(pdev)) {
- device_enable_async_suspend(&pdev->dev);
return 1;
}
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 996fa1959eea..1f8b20496f32 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -15,8 +15,6 @@
#include <linux/ctype.h>
static const struct acpi_device_id acpi_pnp_device_ids[] = {
- /* soc_button_array */
- {"PNP0C40"},
/* pata_isapnp */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* floppy */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 1f9aba5fb81f..2747279fbe3c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
u32 field_bit_position;
u32 field_bit_length;
u16 resource_length;
+ u16 pin_number_index;
u8 field_flags;
u8 attribute;
u8 field_type;
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 22fb6449d3d6..8abb393dafab 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -264,6 +264,7 @@ struct acpi_object_region_field {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
union acpi_operand_object *region_obj; /* Containing op_region object */
u8 *resource_buffer; /* resource_template for serial regions/fields */
+ u16 pin_number_index; /* Index relative to previous Connection/Template */
};
struct acpi_object_bank_field {
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 3661c8e90540..c57666196672 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
*/
info->resource_buffer = NULL;
info->connection_node = NULL;
+ info->pin_number_index = 0;
/*
* A Connection() is either an actual resource descriptor (buffer)
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
}
info->field_bit_position += info->field_bit_length;
+ info->pin_number_index++; /* Index relative to previous Connection() */
break;
default:
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 9957297d1580..8eb8575e8c16 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
struct acpi_connection_info *context;
+ acpi_physical_address address;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* We have everything we need, we can invoke the address space handler */
handler = handler_desc->address_space.handler;
-
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
- &region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
- region_offset),
- acpi_ut_get_region_name(region_obj->region.
- space_id)));
+ address = (region_obj->region.address + region_offset);
/*
* Special handling for generic_serial_bus and general_purpose_io:
* There are three extra parameters that must be passed to the
* handler via the context:
- * 1) Connection buffer, a resource template from Connection() op.
- * 2) Length of the above buffer.
- * 3) Actual access length from the access_as() op.
+ * 1) Connection buffer, a resource template from Connection() op
+ * 2) Length of the above buffer
+ * 3) Actual access length from the access_as() op
+ *
+ * In addition, for general_purpose_io, the Address and bit_width fields
+ * are defined as follows:
+ * 1) Address is the pin number index of the field (bit offset from
+ * the previous Connection)
+ * 2) bit_width is the actual bit length of the field (number of pins)
*/
- if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
- (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
context && field_obj) {
/* Get the Connection (resource_template) buffer */
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
context->length = field_obj->field.resource_length;
context->access_length = field_obj->field.access_length;
}
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+ context && field_obj) {
+
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+ context->length = field_obj->field.resource_length;
+ context->access_length = field_obj->field.access_length;
+ address = field_obj->field.pin_number_index;
+ bit_width = field_obj->field.bit_length;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+ ACPI_FORMAT_NATIVE_UINT(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Call the handler */
- status = handler(function,
- (region_obj->region.address + region_offset),
- bit_width, value, context,
+ status = handler(function, address, bit_width, value, context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6907ce0c704c..b994845ed359 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
buffer = &buffer_desc->integer.value;
}
+ if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+ (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GPIO)) {
+ /*
+ * For GPIO (general_purpose_io), the Address will be the bit offset
+ * from the previous Connection() operator, making it effectively a
+ * pin number index. The bit_length is the length of the field, which
+ * is thus the number of pins.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "GPIO FieldRead [FROM]: Pin %u Bits %u\n",
+ obj_desc->field.pin_number_index,
+ obj_desc->field.bit_length));
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Perform the write */
+
+ status = acpi_ex_access_region(obj_desc, 0,
+ (u64 *)buffer, ACPI_READ);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(buffer_desc);
+ } else {
+ *ret_buffer_desc = buffer_desc;
+ }
+ return_ACPI_STATUS(status);
+ }
+
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, obj_desc->common.type, buffer,
@@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
*result_desc = buffer_desc;
return_ACPI_STATUS(status);
+ } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+ (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GPIO)) {
+ /*
+ * For GPIO (general_purpose_io), we will bypass the entire field
+ * mechanism and handoff the bit address and bit width directly to
+ * the handler. The Address will be the bit offset
+ * from the previous Connection() operator, making it effectively a
+ * pin number index. The bit_length is the length of the field, which
+ * is thus the number of pins.
+ */
+ if (source_desc->common.type != ACPI_TYPE_INTEGER) {
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n",
+ acpi_ut_get_type_name(source_desc->common.
+ type),
+ source_desc->common.type,
+ (u32)source_desc->integer.value,
+ obj_desc->field.pin_number_index,
+ obj_desc->field.bit_length));
+
+ buffer = &source_desc->integer.value;
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Perform the write */
+
+ status = acpi_ex_access_region(obj_desc, 0,
+ (u64 *)buffer, ACPI_WRITE);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ return_ACPI_STATUS(status);
}
/* Get a pointer to the data to be written */
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index ee3f872870bc..118e942005e5 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.resource_length = info->resource_length;
}
+ obj_desc->field.pin_number_index = info->pin_number_index;
+
/* Allow full data read from EC address space */
if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 76f7cff64594..c8ead9f97375 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev)
device_unregister(dev);
}
+static void container_device_online(struct acpi_device *adev)
+{
+ struct device *dev = acpi_driver_data(adev);
+
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
.attach = container_device_attach,
@@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = {
.hotplug = {
.enabled = true,
.demand_offline = true,
+ .notify_online = container_device_online,
},
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3bf7764659a4..ae44d8654c82 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0)
- return EINVAL;
+ return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
@@ -2189,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device)
ok:
list_for_each_entry(child, &device->children, node)
acpi_bus_attach(child);
+
+ if (device->handler && device->handler->hotplug.notify_online)
+ device->handler->hotplug.notify_online(device);
}
/**
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index fcbda105616e..8e7e18567ae6 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -750,6 +750,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
},
},
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "ThinkPad X201s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
+ },
+ },
/* The native backlight controls do not work on some older machines */
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 4251570610c9..8a3f51f7b1b9 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -11,12 +11,15 @@ config REGMAP
config REGMAP_I2C
tristate
+ depends on I2C
config REGMAP_SPI
tristate
+ depends on SPI
config REGMAP_SPMI
tristate
+ depends on SPMI
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index bfc90b8547f2..0da5865df5b1 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -49,8 +49,10 @@ struct regmap_async {
};
struct regmap {
- struct mutex mutex;
- spinlock_t spinlock;
+ union {
+ struct mutex mutex;
+ spinlock_t spinlock;
+ };
unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 5617da6dc898..f1280dc356d0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -269,8 +269,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
map->cache_bypass = 1;
ret = _regmap_write(map, reg, val);
map->cache_bypass = 0;
- if (ret)
+ if (ret) {
+ dev_err(map->dev, "Unable to sync register %#x. %d\n",
+ reg, ret);
return ret;
+ }
dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
}
@@ -615,8 +618,11 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
ret = _regmap_write(map, regtmp, val);
map->cache_bypass = 0;
- if (ret != 0)
+ if (ret != 0) {
+ dev_err(map->dev, "Unable to sync register %#x. %d\n",
+ regtmp, ret);
return ret;
+ }
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
regtmp, val);
}
@@ -641,6 +647,9 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
map->cache_bypass = 1;
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
+ if (ret)
+ dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
+ base, cur - map->reg_stride, ret);
map->cache_bypass = 0;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 0c94b661c16f..5799a0b9e6cc 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -473,6 +473,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
{
struct rb_node *next;
struct regmap_range_node *range_node;
+ const char *devname = "dummy";
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
@@ -491,12 +492,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
+ if (map->dev)
+ devname = dev_name(map->dev);
+
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
- dev_name(map->dev), name);
+ devname, name);
name = map->debugfs_name;
} else {
- name = dev_name(map->dev);
+ name = devname;
}
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ca193d1ef47c..053150a7f9f2 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -168,6 +168,8 @@ static struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 0eb3097c0d76..53d1148e80a0 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -109,6 +109,8 @@ static struct regmap_bus regmap_spi = {
.async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
};
/**
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 1cf427bc0d4a..d2f8a818d200 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
@@ -448,6 +449,71 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ enum regmap_endian endian;
+
+ /* Retrieve the endianness specification from the regmap config */
+ endian = config->reg_format_endian;
+
+ /* If the regmap config specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Retrieve the endianness specification from the bus config */
+ if (bus && bus->reg_format_endian_default)
+ endian = bus->reg_format_endian_default;
+
+ /* If the bus specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Use this if no other value was found */
+ return REGMAP_ENDIAN_BIG;
+}
+
+static enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ struct device_node *np;
+ enum regmap_endian endian;
+
+ /* Retrieve the endianness specification from the regmap config */
+ endian = config->val_format_endian;
+
+ /* If the regmap config specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* If the dev and dev->of_node exist try to get endianness from DT */
+ if (dev && dev->of_node) {
+ np = dev->of_node;
+
+ /* Parse the device's DT node for an endianness specification */
+ if (of_property_read_bool(np, "big-endian"))
+ endian = REGMAP_ENDIAN_BIG;
+ else if (of_property_read_bool(np, "little-endian"))
+ endian = REGMAP_ENDIAN_LITTLE;
+
+ /* If the endianness was specified in DT, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+ }
+
+ /* Retrieve the endianness specification from the bus config */
+ if (bus && bus->val_format_endian_default)
+ endian = bus->val_format_endian_default;
+
+ /* If the bus specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Use this if no other value was found */
+ return REGMAP_ENDIAN_BIG;
+}
+
/**
* regmap_init(): Initialise register map
*
@@ -551,17 +617,8 @@ struct regmap *regmap_init(struct device *dev,
map->reg_read = _regmap_bus_read;
}
- reg_endian = config->reg_format_endian;
- if (reg_endian == REGMAP_ENDIAN_DEFAULT)
- reg_endian = bus->reg_format_endian_default;
- if (reg_endian == REGMAP_ENDIAN_DEFAULT)
- reg_endian = REGMAP_ENDIAN_BIG;
-
- val_endian = config->val_format_endian;
- if (val_endian == REGMAP_ENDIAN_DEFAULT)
- val_endian = bus->val_format_endian_default;
- if (val_endian == REGMAP_ENDIAN_DEFAULT)
- val_endian = REGMAP_ENDIAN_BIG;
+ reg_endian = regmap_get_reg_endian(bus, config);
+ val_endian = regmap_get_val_endian(dev, bus, config);
switch (config->reg_bits + map->reg_shift) {
case 2:
@@ -1408,7 +1465,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
#ifdef LOG_DEVICE
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x <= %x\n", reg, val);
#endif
@@ -1659,6 +1716,9 @@ out:
} else {
void *wval;
+ if (!val_count)
+ return -EINVAL;
+
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
dev_err(map->dev, "Error in memory allocation\n");
@@ -2058,7 +2118,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 719cb1bc1640..3b7c9f1be484 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -69,8 +69,8 @@
nice This parameter controls the driver's use of
idle CPU time, at the expense of some speed.
- If this driver is built into the kernel, you can use kernel
- the following command line parameters, with the same values
+ If this driver is built into the kernel, you can use the
+ following kernel command line parameters, with the same values
as the corresponding module parameters listed above:
pcd.drive0
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 551e01061434..95254585db86 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
};
static struct l3_masters_data omap_l3_masters[] = {
- { 0x0 , "MPU"},
- { 0x10, "CS_ADP"},
- { 0x14, "xxx"},
- { 0x20, "DSP"},
- { 0x30, "IVAHD"},
- { 0x40, "ISS"},
- { 0x44, "DucatiM3"},
- { 0x48, "FaceDetect"},
- { 0x50, "SDMA_Rd"},
- { 0x54, "SDMA_Wr"},
- { 0x58, "xxx"},
- { 0x5C, "xxx"},
- { 0x60, "SGX"},
- { 0x70, "DSS"},
- { 0x80, "C2C"},
- { 0x88, "xxx"},
- { 0x8C, "xxx"},
- { 0x90, "HSI"},
- { 0xA0, "MMC1"},
- { 0xA4, "MMC2"},
- { 0xA8, "MMC6"},
- { 0xB0, "UNIPRO1"},
- { 0xC0, "USBHOSTHS"},
- { 0xC4, "USBOTGHS"},
- { 0xC8, "USBHOSTFS"}
+ { 0x00, "MPU"},
+ { 0x04, "CS_ADP"},
+ { 0x05, "xxx"},
+ { 0x08, "DSP"},
+ { 0x0C, "IVAHD"},
+ { 0x10, "ISS"},
+ { 0x11, "DucatiM3"},
+ { 0x12, "FaceDetect"},
+ { 0x14, "SDMA_Rd"},
+ { 0x15, "SDMA_Wr"},
+ { 0x16, "xxx"},
+ { 0x17, "xxx"},
+ { 0x18, "SGX"},
+ { 0x1C, "DSS"},
+ { 0x20, "C2C"},
+ { 0x22, "xxx"},
+ { 0x23, "xxx"},
+ { 0x24, "HSI"},
+ { 0x28, "MMC1"},
+ { 0x29, "MMC2"},
+ { 0x2A, "MMC6"},
+ { 0x2C, "UNIPRO1"},
+ { 0x30, "USBHOSTHS"},
+ { 0x31, "USBOTGHS"},
+ { 0x32, "USBHOSTFS"}
};
static struct l3_flagmux_data *omap_l3_flagmux[] = {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 836b061ced35..91a04ae8003c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -333,6 +333,19 @@ config HW_RANDOM_MSM
If unsure, say Y.
+config HW_RANDOM_XGENE
+ tristate "APM X-Gene True Random Number Generator (TRNG) support"
+ depends on HW_RANDOM && ARCH_XGENE
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on APM X-Gene SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xgene_rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 199ed283e149..0b4cd57f4e24 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
+obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index c6af038682f1..48f6a83cdd61 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -142,10 +142,10 @@ found:
amd_rng.priv = (unsigned long)pmbase;
amd_pdev = pdev;
- printk(KERN_INFO "AMD768 RNG detected\n");
+ pr_info("AMD768 RNG detected\n");
err = hwrng_register(&amd_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
release_region(pmbase + 0xF0, 8);
goto out;
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 4c4d4e140f98..0d0579fe465e 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -109,10 +109,10 @@ found:
goto out;
geode_rng.priv = (unsigned long)mem;
- printk(KERN_INFO "AMD Geode RNG detected\n");
+ pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
goto err_unmap;
}
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 86fe45c19968..290c880266bf 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -199,7 +199,7 @@ static int intel_rng_init(struct hwrng *rng)
if ((hw_status & INTEL_RNG_ENABLED) == 0)
hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
if ((hw_status & INTEL_RNG_ENABLED) == 0) {
- printk(KERN_ERR PFX "cannot enable RNG, aborting\n");
+ pr_err(PFX "cannot enable RNG, aborting\n");
goto out;
}
err = 0;
@@ -216,7 +216,7 @@ static void intel_rng_cleanup(struct hwrng *rng)
if (hw_status & INTEL_RNG_ENABLED)
hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
else
- printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
+ pr_warn(PFX "unusual: RNG already disabled\n");
}
@@ -274,7 +274,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw)
if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
(dvc != INTEL_FWH_DEVICE_CODE_8M &&
dvc != INTEL_FWH_DEVICE_CODE_4M)) {
- printk(KERN_NOTICE PFX "FWH not detected\n");
+ pr_notice(PFX "FWH not detected\n");
return -ENODEV;
}
@@ -306,7 +306,6 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
(BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
== BIOS_CNTL_LOCK_ENABLE_MASK) {
static __initdata /*const*/ char warning[] =
- KERN_WARNING
PFX "Firmware space is locked read-only. If you can't or\n"
PFX "don't want to disable this in firmware setup, and if\n"
PFX "you are certain that your system has a functional\n"
@@ -314,7 +313,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
if (no_fwh_detect)
return -ENODEV;
- printk(warning);
+ pr_warn("%s", warning);
return -EBUSY;
}
@@ -392,10 +391,10 @@ fwh_done:
goto out;
}
- printk(KERN_INFO "Intel 82802 RNG detected\n");
+ pr_info("Intel 82802 RNG detected\n");
err = hwrng_register(&intel_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
iounmap(mem);
}
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c66279bb6ef3..c0347d1dded0 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -113,7 +113,7 @@ static int rng_probe(struct platform_device *ofdev)
pasemi_rng.priv = (unsigned long)rng_regs;
- printk(KERN_INFO "Registering PA Semi RNG\n");
+ pr_info("Registering PA Semi RNG\n");
err = hwrng_register(&pasemi_rng);
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index ab7ffdec0ec3..6226aa08c36a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -86,7 +86,7 @@ static struct vio_driver pseries_rng_driver = {
static int __init rng_init(void)
{
- printk(KERN_INFO "Registering IBM pSeries RNG driver\n");
+ pr_info("Registering IBM pSeries RNG driver\n");
return vio_register_driver(&pseries_rng_driver);
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index de5a6dcfb3e2..a3bebef255ad 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -141,7 +141,7 @@ static int via_rng_init(struct hwrng *rng)
* register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) {
- printk(KERN_ERR PFX "can't enable hardware RNG "
+ pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
return -ENODEV;
}
@@ -180,7 +180,7 @@ static int via_rng_init(struct hwrng *rng)
unneeded */
rdmsr(MSR_VIA_RNG, lo, hi);
if ((lo & VIA_RNG_ENABLE) == 0) {
- printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
+ pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
return -ENODEV;
}
@@ -202,10 +202,10 @@ static int __init mod_init(void)
if (!cpu_has_xstore)
return -ENODEV;
- printk(KERN_INFO "VIA RNG detected\n");
+ pr_info("VIA RNG detected\n");
err = hwrng_register(&via_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
goto out;
}
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
new file mode 100644
index 000000000000..23caa05380a8
--- /dev/null
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -0,0 +1,423 @@
+/*
+ * APM X-Gene SoC RNG Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Rameshwar Prasad Sahu <rsahu@apm.com>
+ * Shamal Winchurkar <swinchurkar@apm.com>
+ * Feng Kan <fkan@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/timer.h>
+
+#define RNG_MAX_DATUM 4
+#define MAX_TRY 100
+#define XGENE_RNG_RETRY_COUNT 20
+#define XGENE_RNG_RETRY_INTERVAL 10
+
+/* RNG Registers */
+#define RNG_INOUT_0 0x00
+#define RNG_INTR_STS_ACK 0x10
+#define RNG_CONTROL 0x14
+#define RNG_CONFIG 0x18
+#define RNG_ALARMCNT 0x1c
+#define RNG_FROENABLE 0x20
+#define RNG_FRODETUNE 0x24
+#define RNG_ALARMMASK 0x28
+#define RNG_ALARMSTOP 0x2c
+#define RNG_OPTIONS 0x78
+#define RNG_EIP_REV 0x7c
+
+#define MONOBIT_FAIL_MASK BIT(7)
+#define POKER_FAIL_MASK BIT(6)
+#define LONG_RUN_FAIL_MASK BIT(5)
+#define RUN_FAIL_MASK BIT(4)
+#define NOISE_FAIL_MASK BIT(3)
+#define STUCK_OUT_MASK BIT(2)
+#define SHUTDOWN_OFLO_MASK BIT(1)
+#define READY_MASK BIT(0)
+
+#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24)
+#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20)
+#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16)
+#define MAX_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000))
+#define MIN_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ALARM_THRESHOLD_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ENABLE_RNG_SET(dst, src) \
+ ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10)))
+#define REGSPEC_TEST_MODE_SET(dst, src) \
+ ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8)))
+#define MONOBIT_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7)))
+#define POKER_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6)))
+#define LONG_RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5)))
+#define RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4)))
+#define NOISE_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3)))
+#define STUCK_OUT_MASK_SET(dst, src) \
+ ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2)))
+#define SHUTDOWN_OFLO_MASK_SET(dst, src) \
+ ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1)))
+
+struct xgene_rng_dev {
+ u32 irq;
+ void __iomem *csr_base;
+ u32 revision;
+ u32 datum_size;
+ u32 failure_cnt; /* Failure count last minute */
+ unsigned long failure_ts;/* First failure timestamp */
+ struct timer_list failure_timer;
+ struct device *dev;
+ struct clk *clk;
+};
+
+static void xgene_rng_expired_timer(unsigned long arg)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg;
+
+ /* Clear failure counter as timer expired */
+ disable_irq(ctx->irq);
+ ctx->failure_cnt = 0;
+ del_timer(&ctx->failure_timer);
+ enable_irq(ctx->irq);
+}
+
+static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
+{
+ ctx->failure_timer.data = (unsigned long) ctx;
+ ctx->failure_timer.function = xgene_rng_expired_timer;
+ ctx->failure_timer.expires = jiffies + 120 * HZ;
+ add_timer(&ctx->failure_timer);
+}
+
+/*
+ * Initialize or reinit free running oscillators (FROs)
+ */
+static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
+{
+ writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
+ writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
+}
+
+static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if (val & MONOBIT_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds number of 1s after
+ * checking 20,000 bits (test T1 as specified in the
+ * AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
+ if (val & POKER_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds value in at least one
+ * of the 16 poker_count_X counters or an out of bounds sum
+ * of squares value after checking 20,000 bits (test T2 as
+ * specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
+ if (val & LONG_RUN_FAIL_MASK)
+ /*
+ * LFSR detected a sequence of 34 identical bits
+ * (test T4 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
+ if (val & RUN_FAIL_MASK)
+ /*
+ * LFSR detected an outof-bounds value for at least one
+ * of the running counters after checking 20,000 bits
+ * (test T3 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
+ if (val & NOISE_FAIL_MASK)
+ /* LFSR detected a sequence of 48 identical bits */
+ dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
+ if (val & STUCK_OUT_MASK)
+ /*
+ * Detected output data registers generated same value twice
+ * in a row
+ */
+ dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
+
+ if (val & SHUTDOWN_OFLO_MASK) {
+ u32 frostopped;
+
+ /* FROs shut down after a second error event. Try recover. */
+ if (++ctx->failure_cnt == 1) {
+ /* 1st time, just recover */
+ ctx->failure_ts = jiffies;
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+
+ /*
+ * We must start a timer to clear out this error
+ * in case the system timer wrap around
+ */
+ xgene_rng_start_timer(ctx);
+ } else {
+ /* 2nd time failure in lesser than 1 minute? */
+ if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
+ dev_err(ctx->dev,
+ "FRO shutdown failure error 0x%08X\n",
+ val);
+ } else {
+ /* 2nd time failure after 1 minutes, recover */
+ ctx->failure_ts = jiffies;
+ ctx->failure_cnt = 1;
+ /*
+ * We must start a timer to clear out this
+ * error in case the system timer wrap
+ * around
+ */
+ xgene_rng_start_timer(ctx);
+ }
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+ }
+ }
+ /* Clear them all */
+ writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
+}
+
+static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+
+ /* RNG Alarm Counter overflow */
+ xgene_rng_chk_overflow(ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int xgene_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ u32 i, val = 0;
+
+ for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) {
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if ((val & READY_MASK) || !wait)
+ break;
+ udelay(XGENE_RNG_RETRY_INTERVAL);
+ }
+
+ return (val & READY_MASK);
+}
+
+static int xgene_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ int i;
+
+ for (i = 0; i < ctx->datum_size; i++)
+ data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
+
+ /* Clear ready bit to start next transaction */
+ writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ return ctx->datum_size << 2;
+}
+
+static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ writel(0x00000000, ctx->csr_base + RNG_CONTROL);
+
+ val = MAX_REFILL_CYCLES_SET(0, 10);
+ val = MIN_REFILL_CYCLES_SET(val, 10);
+ writel(val, ctx->csr_base + RNG_CONFIG);
+
+ val = ALARM_THRESHOLD_SET(0, 0xFF);
+ writel(val, ctx->csr_base + RNG_ALARMCNT);
+
+ xgene_rng_init_fro(ctx, 0);
+
+ writel(MONOBIT_FAIL_MASK |
+ POKER_FAIL_MASK |
+ LONG_RUN_FAIL_MASK |
+ RUN_FAIL_MASK |
+ NOISE_FAIL_MASK |
+ STUCK_OUT_MASK |
+ SHUTDOWN_OFLO_MASK |
+ READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ val = ENABLE_RNG_SET(0, 1);
+ val = MONOBIT_FAIL_MASK_SET(val, 1);
+ val = POKER_FAIL_MASK_SET(val, 1);
+ val = LONG_RUN_FAIL_MASK_SET(val, 1);
+ val = RUN_FAIL_MASK_SET(val, 1);
+ val = NOISE_FAIL_MASK_SET(val, 1);
+ val = STUCK_OUT_MASK_SET(val, 1);
+ val = SHUTDOWN_OFLO_MASK_SET(val, 1);
+ writel(val, ctx->csr_base + RNG_CONTROL);
+}
+
+static int xgene_rng_init(struct hwrng *rng)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+
+ ctx->failure_cnt = 0;
+ init_timer(&ctx->failure_timer);
+
+ ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
+
+ dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
+ MAJOR_HW_REV_RD(ctx->revision),
+ MINOR_HW_REV_RD(ctx->revision),
+ HW_PATCH_LEVEL_RD(ctx->revision));
+
+ dev_dbg(ctx->dev, "Options 0x%08X",
+ readl(ctx->csr_base + RNG_OPTIONS));
+
+ xgene_rng_init_internal(ctx);
+
+ ctx->datum_size = RNG_MAX_DATUM;
+
+ return 0;
+}
+
+static struct hwrng xgene_rng_func = {
+ .name = "xgene-rng",
+ .init = xgene_rng_init,
+ .data_present = xgene_rng_data_present,
+ .data_read = xgene_rng_data_read,
+};
+
+static int xgene_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xgene_rng_dev *ctx;
+ int rc = 0;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->csr_base))
+ return PTR_ERR(ctx->csr_base);
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return ctx->irq;
+ }
+
+ dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
+ ctx->csr_base, ctx->irq);
+
+ rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
+ dev_name(&pdev->dev), ctx);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
+ return rc;
+ }
+
+ /* Enable IP clock */
+ ctx->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ctx->clk)) {
+ dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
+ } else {
+ rc = clk_prepare_enable(ctx->clk);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "clock prepare enable failed for RNG");
+ return rc;
+ }
+ }
+
+ xgene_rng_func.priv = (unsigned long) ctx;
+
+ rc = hwrng_register(&xgene_rng_func);
+ if (rc) {
+ dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ return rc;
+ }
+
+ rc = device_init_wakeup(&pdev->dev, 1);
+ if (rc) {
+ dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
+ rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ hwrng_unregister(&xgene_rng_func);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xgene_rng_remove(struct platform_device *pdev)
+{
+ struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = device_init_wakeup(&pdev->dev, 0);
+ if (rc)
+ dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ hwrng_unregister(&xgene_rng_func);
+
+ return rc;
+}
+
+static const struct of_device_id xgene_rng_of_match[] = {
+ { .compatible = "apm,xgene-rng" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
+
+static struct platform_driver xgene_rng_driver = {
+ .probe = xgene_rng_probe,
+ .remove = xgene_rng_remove,
+ .driver = {
+ .name = "xgene-rng",
+ .of_match_table = xgene_rng_of_match,
+ },
+};
+
+module_platform_driver(xgene_rng_driver);
+MODULE_DESCRIPTION("APM X-Gene RNG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e6db9381b2c7..f816211f062f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2796,7 +2796,6 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
= IPMI_CHANNEL_MEDIUM_IPMB;
intf->channels[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB;
- rv = -ENOSYS;
intf->curr_channel = IPMI_MAX_CHANNELS;
wake_up(&intf->waitq);
@@ -2821,12 +2820,12 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if (rv) {
/* Got an error somehow, just give up. */
+ printk(KERN_WARNING PFX
+ "Error sending channel information for channel"
+ " %d: %d\n", intf->curr_channel, rv);
+
intf->curr_channel = IPMI_MAX_CHANNELS;
wake_up(&intf->waitq);
-
- printk(KERN_WARNING PFX
- "Error sending channel information: %d\n",
- rv);
}
}
out:
@@ -2964,8 +2963,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
intf->null_user_handler = channel_handler;
intf->curr_channel = 0;
rv = send_channel_info_cmd(intf, 0);
- if (rv)
+ if (rv) {
+ printk(KERN_WARNING PFX
+ "Error sending channel information for channel"
+ " 0, %d\n", rv);
goto out;
+ }
/* Wait for the channel info to be read. */
wait_event(intf->waitq,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5d665680ae33..5c4e1f625bbb 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -965,9 +965,9 @@ static inline int ipmi_si_is_busy(struct timespec *ts)
return ts->tv_nsec != -1;
}
-static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
- const struct smi_info *smi_info,
- struct timespec *busy_until)
+static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ struct timespec *busy_until)
{
unsigned int max_busy_us = 0;
@@ -2658,6 +2658,9 @@ static int ipmi_probe(struct platform_device *dev)
if (!match)
return -EINVAL;
+ if (!of_device_is_available(np))
+ return -EINVAL;
+
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_warn(&dev->dev, PFX "invalid address from OF\n");
@@ -3655,6 +3658,9 @@ static void cleanup_one_si(struct smi_info *to_clean)
if (!to_clean)
return;
+ if (to_clean->dev)
+ dev_set_drvdata(to_clean->dev, NULL);
+
list_del(&to_clean->link);
/* Tell the driver that we are shutting down. */
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index bd377472dcfb..02e76ac6d282 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL");
static int srom_devs; /* Number of SROM partitions */
static struct cdev srom_cdev;
+static struct platform_device *srom_parent;
static struct class *srom_class;
static struct srom_dev *srom_devices;
@@ -350,7 +351,7 @@ static int srom_setup_minor(struct srom_dev *srom, int index)
SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
return -EIO;
- dev = device_create(srom_class, &platform_bus,
+ dev = device_create(srom_class, &srom_parent->dev,
MKDEV(srom_major, index), srom, "%d", index);
return PTR_ERR_OR_ZERO(dev);
}
@@ -415,6 +416,13 @@ static int srom_init(void)
if (result < 0)
goto fail_chrdev;
+ /* Create a parent device */
+ srom_parent = platform_device_register_simple("srom", -1, NULL, 0);
+ if (IS_ERR(srom_parent)) {
+ result = PTR_ERR(srom_parent);
+ goto fail_pdev;
+ }
+
/* Create a sysfs class. */
srom_class = class_create(THIS_MODULE, "srom");
if (IS_ERR(srom_class)) {
@@ -438,6 +446,8 @@ fail_class:
device_destroy(srom_class, MKDEV(srom_major, i));
class_destroy(srom_class);
fail_cdev:
+ platform_device_unregister(srom_parent);
+fail_pdev:
cdev_del(&srom_cdev);
fail_chrdev:
unregister_chrdev_region(dev, srom_devs);
@@ -454,6 +464,7 @@ static void srom_cleanup(void)
device_destroy(srom_class, MKDEV(srom_major, i));
class_destroy(srom_class);
cdev_del(&srom_cdev);
+ platform_device_unregister(srom_parent);
unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
kfree(srom_devices);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d9fdeddcef96..61190f6b4829 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1289,6 +1289,8 @@ err_get_freq:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ up_write(&policy->rwsem);
+
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
@@ -1657,7 +1659,7 @@ void cpufreq_suspend(void)
return;
if (!has_target())
- return;
+ goto suspend;
pr_debug("%s: Suspending Governors\n", __func__);
@@ -1671,6 +1673,7 @@ void cpufreq_suspend(void)
policy);
}
+suspend:
cpufreq_suspended = true;
}
@@ -1687,13 +1690,13 @@ void cpufreq_resume(void)
if (!cpufreq_driver)
return;
+ cpufreq_suspended = false;
+
if (!has_target())
return;
pr_debug("%s: Resuming Governors\n", __func__);
- cpufreq_suspended = false;
-
list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index c1320528b9d0..6bd69adc3c5e 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
return cpufreq_register_driver(&integrator_driver);
}
-static void __exit integrator_cpufreq_remove(struct platform_device *pdev)
+static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
{
- cpufreq_unregister_driver(&integrator_driver);
+ return cpufreq_unregister_driver(&integrator_driver);
}
static const struct of_device_id integrator_cpufreq_match[] = {
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 728a2d879499..4d2c8e861089 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
u32 input_buffer;
int cpu;
- spin_lock(&pcc_lock);
cpu = policy->cpu;
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
@@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
+ spin_lock(&pcc_lock);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b464d03ebf40..f347ab7eea95 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -836,8 +836,9 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->sec4_sg + sec4_sg_src_index,
chained);
if (*next_buflen) {
- sg_copy_part(next_buf, req->src, to_hash -
- *buflen, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
state->current_buf = !state->current_buf;
}
} else {
@@ -878,7 +879,8 @@ static int ahash_update_ctx(struct ahash_request *req)
kfree(edesc);
}
} else if (*next_buflen) {
- sg_copy(buf + *buflen, req->src, req->nbytes);
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
@@ -1262,8 +1264,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + 1, chained);
if (*next_buflen) {
- sg_copy_part(next_buf, req->src, to_hash - *buflen,
- req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
state->current_buf = !state->current_buf;
}
@@ -1304,7 +1307,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
kfree(edesc);
}
} else if (*next_buflen) {
- sg_copy(buf + *buflen, req->src, req->nbytes);
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = 0;
}
@@ -1413,9 +1417,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = state->buf_0 + state->current_buf *
- CAAM_MAX_HASH_BLOCK_SIZE;
- int *next_buflen = &state->buflen_0 + state->current_buf;
+ u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *next_buflen = state->current_buf ?
+ &state->buflen_1 : &state->buflen_0;
int to_hash;
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
@@ -1476,7 +1480,8 @@ static int ahash_update_first(struct ahash_request *req)
}
if (*next_buflen)
- sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
@@ -1511,7 +1516,8 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- sg_copy(next_buf, req->src, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3cade79ea41e..31000c8c4a90 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,5 +1,4 @@
-/*
- * CAAM control-plane driver backend
+/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
@@ -81,38 +80,37 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
u32 deco_dbg_reg, flags;
int i;
- /* Set the bit to request direct access to DECO0 */
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
if (ctrlpriv->virt_en == 1) {
- setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+ setbits32(&ctrl->deco_rsr, DECORSR_JR0);
- while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+ while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
--timeout)
cpu_relax();
timeout = 100000;
}
- setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
- while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
+ while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
--timeout)
cpu_relax();
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+ wr_reg32(&deco->descbuf[i], *(desc + i));
flags = DECO_JQCR_WHL;
/*
@@ -123,11 +121,11 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- wr_reg32(&topregs->deco.jr_ctl_hi, flags);
+ wr_reg32(&deco->jr_ctl_hi, flags);
timeout = 10000000;
do {
- deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ deco_dbg_reg = rd_reg32(&deco->desc_dbg);
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
@@ -138,14 +136,14 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
- *status = rd_reg32(&topregs->deco.op_status_hi) &
+ *status = rd_reg32(&deco->op_status_hi) &
DECO_OP_STATUS_HI_ERR_MASK;
if (ctrlpriv->virt_en == 1)
- clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+ clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
/* Mark the DECO as free */
- clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
if (!timeout)
return -EAGAIN;
@@ -176,13 +174,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int gen_sk)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 *desc, status, rdsta_val;
int ret = 0, sh_idx;
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
- r4tst = &topregs->ctrl.r4tst[0];
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ r4tst = &ctrl->r4tst[0];
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
@@ -212,12 +210,11 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* CAAM eras), then try again.
*/
rdsta_val =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if (status || !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
break;
-
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
@@ -285,12 +282,12 @@ static int caam_remove(struct platform_device *pdev)
{
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
int ring, ret = 0;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
@@ -308,7 +305,7 @@ static int caam_remove(struct platform_device *pdev)
#endif
/* Unmap controller region */
- iounmap(&topregs->ctrl);
+ iounmap(&ctrl);
return ret;
}
@@ -323,12 +320,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 val;
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
- r4tst = &topregs->ctrl.r4tst[0];
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ r4tst = &ctrl->r4tst[0];
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
@@ -355,10 +352,19 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count, equal to 1/4 of the entropy sample length */
wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
- /* max. freq. count, equal to 8 times the entropy sample length */
- wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
+ /* disable maximum frequency count */
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+ /* read the control register */
+ val = rd_reg32(&r4tst->rtmctl);
+ /*
+ * select raw sampling in both entropy shifter
+ * and statistical checker
+ */
+ setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
/* put RNG4 into run mode */
- clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrbits32(&val, RTMCTL_PRGM);
+ /* write back the control register */
+ wr_reg32(&r4tst->rtmctl, val);
}
/**
@@ -387,13 +393,14 @@ static int caam_probe(struct platform_device *pdev)
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
- struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
u32 cha_vid_ls;
+ int pg_size;
+ int BLOCK_OFFSET = 0;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
GFP_KERNEL);
@@ -412,10 +419,27 @@ static int caam_probe(struct platform_device *pdev)
dev_err(dev, "caam: of_iomap() failed\n");
return -ENOMEM;
}
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+ /* Finding the page size for using the CTPR_MS register */
+ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
- /* topregs used to derive pointers to CAAM sub-blocks only */
- topregs = (struct caam_full __iomem *)ctrl;
+ /* Allocating the BLOCK_OFFSET based on the supported page size on
+ * the platform
+ */
+ if (pg_size == 0)
+ BLOCK_OFFSET = PG_SIZE_4K;
+ else
+ BLOCK_OFFSET = PG_SIZE_64K;
+
+ ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+ );
+ ctrlpriv->deco = (struct caam_deco __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * DECO_BLOCK_NUMBER
+ );
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
@@ -424,15 +448,14 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
*/
- comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
- scfgr = rd_reg32(&topregs->ctrl.scfgr);
+ scfgr = rd_reg32(&ctrl->scfgr);
ctrlpriv->virt_en = 0;
if (comp_params & CTPR_MS_VIRT_EN_INCL) {
@@ -450,7 +473,7 @@ static int caam_probe(struct platform_device *pdev)
}
if (ctrlpriv->virt_en == 1)
- setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+ setbits32(&ctrl->jrstart, JRSTART_JR0_START |
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
@@ -477,7 +500,7 @@ static int caam_probe(struct platform_device *pdev)
sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
- iounmap(&topregs->ctrl);
+ iounmap(&ctrl);
return -ENOMEM;
}
@@ -493,18 +516,26 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
+ ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
+ ((uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
ctrlpriv->total_jobrs++;
ring++;
- }
+ }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
- !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+ !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
+ ctrlpriv->qi = (struct caam_queue_if __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * QI_BLOCK_NUMBER
+ );
/* This is all that's required to physically enable QI */
- wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
+ wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
}
/* If no QI and no rings specified, quit and go home */
@@ -514,7 +545,7 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
- cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
+ cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
@@ -522,7 +553,7 @@ static int caam_probe(struct platform_device *pdev)
*/
if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ rd_reg32(&ctrl->r4tst[0].rdsta);
/*
* If the secure keys (TDKEK, JDKEK, TDSK), were already
* generated, signal this to the function that is instantiating
@@ -533,7 +564,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
do {
int inst_handles =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ rd_reg32(&ctrl->r4tst[0].rdsta) &
RDSTA_IFMASK;
/*
* If either SH were instantiated by somebody else
@@ -544,6 +575,9 @@ static int caam_probe(struct platform_device *pdev)
* the TRNG parameters.
*/
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
kick_trng(pdev, ent_delay);
ent_delay += 400;
}
@@ -556,6 +590,12 @@ static int caam_probe(struct platform_device *pdev)
*/
ret = instantiate_rng(dev, inst_handles,
gen_sk);
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
@@ -569,13 +609,13 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
- setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
+ setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
- (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
+ caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+ (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 97363db4e56e..89b94cc9e7a2 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -70,10 +70,11 @@ struct caam_drv_private {
struct platform_device *pdev;
/* Physical-presence section */
- struct caam_ctrl *ctrl; /* controller region */
- struct caam_deco **deco; /* DECO/CCB views */
- struct caam_assurance *ac;
- struct caam_queue_if *qi; /* QI control region */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+ struct caam_deco __iomem *deco; /* DECO/CCB views */
+ struct caam_assurance __iomem *assure;
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
/*
* Detected geometry block. Filled in from device tree if powerpc,
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index f48e344ffc39..378ddc17f60e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -194,6 +194,8 @@ struct caam_perfmon {
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
+#define CTPR_MS_PG_SZ_MASK 0x10
+#define CTPR_MS_PG_SZ_SHIFT 4
u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
@@ -269,6 +271,16 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
+ both entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
+ entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
+ entropy shifter, raw data
+ in statistical checker */
+#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
@@ -278,7 +290,7 @@ struct rng4tst {
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
-#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
@@ -286,6 +298,7 @@ struct rng4tst {
u32 rttotsam; /* PRGM=0: total samples register */
};
u32 rtfrqmin; /* frequency count min. limit register */
+#define RTFRQMAX_DISABLE (1 << 20)
union {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
@@ -758,34 +771,10 @@ struct caam_deco {
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
-/*
- * Current top-level view of memory map is:
- *
- * 0x0000 - 0x0fff - CAAM Top-Level Control
- * 0x1000 - 0x1fff - Job Ring 0
- * 0x2000 - 0x2fff - Job Ring 1
- * 0x3000 - 0x3fff - Job Ring 2
- * 0x4000 - 0x4fff - Job Ring 3
- * 0x5000 - 0x5fff - (unused)
- * 0x6000 - 0x6fff - Assurance Controller
- * 0x7000 - 0x7fff - Queue Interface
- * 0x8000 - 0x8fff - DECO-CCB 0
- * 0x9000 - 0x9fff - DECO-CCB 1
- * 0xa000 - 0xafff - DECO-CCB 2
- * 0xb000 - 0xbfff - DECO-CCB 3
- * 0xc000 - 0xcfff - DECO-CCB 4
- *
- * caam_full describes the full register view of CAAM if useful,
- * although many configurations may choose to implement parts of
- * the register map separately, in differing privilege regions
- */
-struct caam_full {
- struct caam_ctrl __iomem ctrl;
- struct caam_job_ring jr[4];
- u64 rsvd[512];
- struct caam_assurance assure;
- struct caam_queue_if qi;
- struct caam_deco deco;
-};
-
+#define JR_BLOCK_NUMBER 1
+#define ASSURE_BLOCK_NUMBER 6
+#define QI_BLOCK_NUMBER 7
+#define DECO_BLOCK_NUMBER 8
+#define PG_SIZE_4K 0x1000
+#define PG_SIZE_64K 0x10000
#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b12ff85f4241..ce28a563effc 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
}
return nents;
}
-
-/* Map SG page in kernel virtual address space and copy */
-static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
- int len, int offset)
-{
- u8 *mapped_addr;
-
- /*
- * Page here can be user-space pinned using get_user_pages
- * Same must be kmapped before use and kunmapped subsequently
- */
- mapped_addr = kmap_atomic(sg_page(sg));
- memcpy(dest, mapped_addr + offset, len);
- kunmap_atomic(mapped_addr);
-}
-
-/* Copy from len bytes of sg to dest, starting from beginning */
-static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
-{
- struct scatterlist *current_sg = sg;
- int cpy_index = 0, next_cpy_index = current_sg->length;
-
- while (next_cpy_index < len) {
- sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
- current_sg->offset);
- current_sg = scatterwalk_sg_next(current_sg);
- cpy_index = next_cpy_index;
- next_cpy_index += current_sg->length;
- }
- if (cpy_index < len)
- sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
- current_sg->offset);
-}
-
-/* Copy sg data, from to_skip to end, to dest */
-static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
- int to_skip, unsigned int end)
-{
- struct scatterlist *current_sg = sg;
- int sg_index, cpy_index, offset;
-
- sg_index = current_sg->length;
- while (sg_index <= to_skip) {
- current_sg = scatterwalk_sg_next(current_sg);
- sg_index += current_sg->length;
- }
- cpy_index = sg_index - to_skip;
- offset = current_sg->offset + current_sg->length - cpy_index;
- sg_map_copy(dest, current_sg, cpy_index, offset);
- if (end - sg_index) {
- current_sg = scatterwalk_sg_next(current_sg);
- sg_copy(dest + cpy_index, current_sg, end - sg_index);
- }
-}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 20dc848481e7..4d4e016d755b 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
{
int ret;
+ ret = ccp_present();
+ if (ret)
+ return ret;
+
spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index a7d110652a74..c6e6171eb6d3 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
}
/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+ if (ccp_get_device())
+ return 0;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index 08fcb1116d90..9249d3ed184b 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,4 +1,5 @@
#ifndef __MV_CRYPTO_H__
+#define __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
#define DIGEST_INITIAL_VAL_B 0xdd04
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index d97069b8a8e4..244d73378f0e 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -111,7 +111,7 @@ static int adf_chr_drv_create(void)
drv_device = device_create(adt_ctl_drv.drv_class, NULL,
MKDEV(adt_ctl_drv.major, 0),
NULL, DEVICE_NAME);
- if (!drv_device) {
+ if (IS_ERR(drv_device)) {
pr_err("QAT: failed to create device\n");
goto err_cdev_del;
}
@@ -436,7 +436,7 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
break;
default:
- pr_err("QAT: Invalid ioclt\n");
+ pr_err("QAT: Invalid ioctl\n");
ret = -EFAULT;
break;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index f854bac276b0..c40546079981 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -75,7 +75,7 @@ struct adf_etr_ring_data {
struct adf_etr_bank_data {
struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
- struct tasklet_struct resp_hanlder;
+ struct tasklet_struct resp_handler;
void __iomem *csr_addr;
struct adf_accel_dev *accel_dev;
uint32_t irq_coalesc_timer;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59df48872955..3e26fa2b293f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -105,7 +105,7 @@ struct qat_alg_cd {
#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
struct qat_auth_state {
- uint8_t data[MAX_AUTH_STATE_SIZE];
+ uint8_t data[MAX_AUTH_STATE_SIZE + 64];
} __aligned(64);
struct qat_alg_session_ctx {
@@ -113,10 +113,6 @@ struct qat_alg_session_ctx {
dma_addr_t enc_cd_paddr;
struct qat_alg_cd *dec_cd;
dma_addr_t dec_cd_paddr;
- struct qat_auth_state *auth_hw_state_enc;
- dma_addr_t auth_state_enc_paddr;
- struct qat_auth_state *auth_hw_state_dec;
- dma_addr_t auth_state_dec_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
struct qat_crypto_instance *inst;
@@ -150,8 +146,9 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_session_ctx *ctx,
const uint8_t *auth_key,
- unsigned int auth_keylen, uint8_t *auth_state)
+ unsigned int auth_keylen)
{
+ struct qat_auth_state auth_state;
struct {
struct shash_desc shash;
char ctx[crypto_shash_descsize(ctx->hash_tfm)];
@@ -161,12 +158,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- uint8_t *ipad = auth_state;
+ uint8_t *ipad = auth_state.data;
uint8_t *opad = ipad + block_size;
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
+ memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
desc.shash.tfm = ctx->hash_tfm;
desc.shash.flags = 0x0;
@@ -298,10 +296,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
void *ptr = &req_tmpl->cd_ctrl;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
- struct icp_qat_fw_la_auth_req_params *auth_param =
- (struct icp_qat_fw_la_auth_req_params *)
- ((char *)&req_tmpl->serv_specif_rqpars +
- sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
@@ -312,8 +306,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
- if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
- (uint8_t *)ctx->auth_hw_state_enc))
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
@@ -359,9 +352,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
- auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
- sizeof(struct icp_qat_hw_auth_counter) +
- round_up(hash_cd_ctrl->inner_state1_sz, 8);
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
return 0;
@@ -399,8 +389,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
- if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
- (uint8_t *)ctx->auth_hw_state_dec))
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
@@ -450,9 +439,6 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
- auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
- sizeof(struct icp_qat_hw_auth_counter) +
- round_up(hash_cd_ctrl->inner_state1_sz, 8);
auth_param->auth_res_sz = digestsize;
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
@@ -512,10 +498,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(ctx->inst->accel_dev);
memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
- memset(ctx->auth_hw_state_enc, 0,
- sizeof(struct qat_auth_state));
- memset(ctx->auth_hw_state_dec, 0,
- sizeof(struct qat_auth_state));
memset(&ctx->enc_fw_req_tmpl, 0,
sizeof(struct icp_qat_fw_la_bulk_req));
memset(&ctx->dec_fw_req_tmpl, 0,
@@ -548,22 +530,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
spin_unlock(&ctx->lock);
goto out_free_enc;
}
- ctx->auth_hw_state_enc =
- dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
- &ctx->auth_state_enc_paddr,
- GFP_ATOMIC);
- if (!ctx->auth_hw_state_enc) {
- spin_unlock(&ctx->lock);
- goto out_free_dec;
- }
- ctx->auth_hw_state_dec =
- dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
- &ctx->auth_state_dec_paddr,
- GFP_ATOMIC);
- if (!ctx->auth_hw_state_dec) {
- spin_unlock(&ctx->lock);
- goto out_free_auth_enc;
- }
}
spin_unlock(&ctx->lock);
if (qat_alg_init_sessions(ctx, key, keylen))
@@ -572,14 +538,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
return 0;
out_free_all:
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
- ctx->auth_hw_state_dec = NULL;
-out_free_auth_enc:
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
- ctx->auth_hw_state_enc = NULL;
-out_free_dec:
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
@@ -924,16 +882,6 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
if (ctx->dec_cd)
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
- if (ctx->auth_hw_state_enc)
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_enc,
- ctx->auth_state_enc_paddr);
-
- if (ctx->auth_hw_state_dec)
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_dec,
- ctx->auth_state_dec_paddr);
-
qat_crypto_put_instance(inst);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index b707f292b377..65dd1ff93d3b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -66,7 +66,7 @@
#define ADF_DH895XCC_ETR_MAX_BANKS 32
#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
#define ADF_DH895XCC_SMIA1_MASK 0x1
/* Error detection and correction */
#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d4172dedf775..67ec61e51185 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -70,9 +70,9 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
for (i = 0; i < msix_num_entries; i++)
pci_dev_info->msix_entries.entries[i].entry = i;
- if (pci_enable_msix(pci_dev_info->pci_dev,
- pci_dev_info->msix_entries.entries,
- msix_num_entries)) {
+ if (pci_enable_msix_exact(pci_dev_info->pci_dev,
+ pci_dev_info->msix_entries.entries,
+ msix_num_entries)) {
pr_err("QAT: Failed to enable MSIX IRQ\n");
return -EFAULT;
}
@@ -89,7 +89,7 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
struct adf_etr_bank_data *bank = bank_ptr;
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
- tasklet_hi_schedule(&bank->resp_hanlder);
+ tasklet_hi_schedule(&bank->resp_handler);
return IRQ_HANDLED;
}
@@ -217,7 +217,7 @@ static int adf_setup_bh(struct adf_accel_dev *accel_dev)
int i;
for (i = 0; i < hw_data->num_banks; i++)
- tasklet_init(&priv_data->banks[i].resp_hanlder,
+ tasklet_init(&priv_data->banks[i].resp_handler,
adf_response_handler,
(unsigned long)&priv_data->banks[i]);
return 0;
@@ -230,8 +230,8 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
int i;
for (i = 0; i < hw_data->num_banks; i++) {
- tasklet_disable(&priv_data->banks[i].resp_hanlder);
- tasklet_kill(&priv_data->banks[i].resp_hanlder);
+ tasklet_disable(&priv_data->banks[i].resp_handler);
+ tasklet_kill(&priv_data->banks[i].resp_handler);
}
}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 805e378d59e9..65bedb81de0b 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -14,6 +14,8 @@
#ifndef _DMA_H_
#define _DMA_H_
+#include <linux/dmaengine.h>
+
/* maximum data transfer block size between BAM and CE */
#define QCE_BAM_BURST_SIZE 64
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9b1ea0ef59af..a016490c95ae 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -427,18 +427,6 @@ config DMA_OF
comment "DMA Clients"
depends on DMA_ENGINE
-config NET_DMA
- bool "Network: TCP receive copy offload"
- depends on DMA_ENGINE && NET
- default (INTEL_IOATDMA || FSL_DMA)
- depends on BROKEN
- help
- This enables the use of DMA engines in the network stack to
- offload receive copy-to-user operations, freeing CPU cycles.
-
- Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
- say N.
-
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c6adb925f0b9..cb626c179911 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
-obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d5d30ed863ce..24bfaf0b92ba 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
}
EXPORT_SYMBOL(dmaengine_get_unmap_data);
-/**
- * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
- * @chan: DMA channel to offload copy to
- * @dest_pg: destination page
- * @dest_off: offset in page to copy to
- * @src_pg: source page
- * @src_off: offset in page to copy from
- * @len: length
- *
- * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
- * address according to the DMA mapping API rules for streaming mappings.
- * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
- * (kernel memory or locked user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
- unsigned int dest_off, struct page *src_pg, unsigned int src_off,
- size_t len)
-{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- struct dmaengine_unmap_data *unmap;
- dma_cookie_t cookie;
- unsigned long flags;
-
- unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
- if (!unmap)
- return -ENOMEM;
-
- unmap->to_cnt = 1;
- unmap->from_cnt = 1;
- unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
- DMA_TO_DEVICE);
- unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
- DMA_FROM_DEVICE);
- unmap->len = len;
- flags = DMA_CTRL_ACK;
- tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
- len, flags);
-
- if (!tx) {
- dmaengine_unmap_put(unmap);
- return -ENOMEM;
- }
-
- dma_set_unmap(tx, unmap);
- cookie = tx->tx_submit(tx);
- dmaengine_unmap_put(unmap);
-
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
-
- return cookie;
-}
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-
-/**
- * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
- * @chan: DMA channel to offload copy to
- * @dest: destination address (virtual)
- * @src: source address (virtual)
- * @len: length
- *
- * Both @dest and @src must be mappable to a bus address according to the
- * DMA mapping API rules for streaming mappings.
- * Both @dest and @src must stay memory resident (kernel memory or locked
- * user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
- void *src, size_t len)
-{
- return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
- (unsigned long) dest & ~PAGE_MASK,
- virt_to_page(src),
- (unsigned long) src & ~PAGE_MASK, len);
-}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-
-/**
- * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
- * @chan: DMA channel to offload copy to
- * @page: destination page
- * @offset: offset in page to copy to
- * @kdata: source address (virtual)
- * @len: length
- *
- * Both @page/@offset and @kdata must be mappable to a bus address according
- * to the DMA mapping API rules for streaming mappings.
- * Both @page/@offset and @kdata must stay memory resident (kernel memory or
- * locked user space pages)
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
- unsigned int offset, void *kdata, size_t len)
-{
- return dma_async_memcpy_pg_to_pg(chan, page, offset,
- virt_to_page(kdata),
- (unsigned long) kdata & ~PAGE_MASK, len);
-}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e27cec25c59e..a8d7809e2f4c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -688,14 +688,14 @@ static int dmatest_func(void *data)
runtime = ktime_us_delta(ktime_get(), ktime);
ret = 0;
+err_dstbuf:
for (i = 0; thread->dsts[i]; i++)
kfree(thread->dsts[i]);
-err_dstbuf:
kfree(thread->dsts);
err_dsts:
+err_srcbuf:
for (i = 0; thread->srcs[i]; i++)
kfree(thread->srcs[i]);
-err_srcbuf:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9e84d5bc9307..3b55bb8d969a 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -35,6 +35,7 @@
#include "dma.h"
#include "registers.h"
+#include "dma_v2.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 id;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
@@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
int i;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
@@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 global_req_table;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
@@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
u16 global_req_table;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
@@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 global_req_table;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
@@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
u16 global_req_table;
/* This implementation only supports PCI-Express */
- if (dev->bus != &pci_bus_type)
+ if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 4e3549a16132..940c1502a8b5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -947,7 +947,7 @@ msix:
for (i = 0; i < msixcnt; i++)
device->msix_entries[i].entry = i;
- err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
+ err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
if (err)
goto msi;
@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(4096);
err = ioat_register(device);
if (err)
return err;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e982f00a9843..d63f68b1aa35 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
#define dump_desc_dbg(c, d) \
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
-static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
-{
- #ifdef CONFIG_NET_DMA
- sysctl_tcp_dma_copybreak = copybreak;
- #endif
-}
-
static inline struct ioat_chan_common *
ioat_chan_by_index(struct ioatdma_device *device, int index)
{
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8d1058085eeb..695483e6be32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
* called under bh_disabled so we need to trigger the timer
* event directly
*/
- if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
+ if (time_is_before_jiffies(chan->timer.expires)
+ && timer_pending(&chan->timer)) {
struct ioatdma_device *device = chan->device;
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
@@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(2048);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b9b38a1cf92f..895f869d6c2c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
@@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
}
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
@@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags);
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(262144);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
deleted file mode 100644
index bb48a57c2fc1..000000000000
--- a/drivers/dma/iovlock.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- * Portions based on net/core/datagram.c and copyrighted by their authors.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-
-/*
- * This code allows the net stack to make use of a DMA engine for
- * skb to iovec copies.
- */
-
-#include <linux/dmaengine.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <net/tcp.h> /* for memcpy_toiovec */
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-static int num_pages_spanned(struct iovec *iov)
-{
- return
- ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
- ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
-}
-
-/*
- * Pin down all the iovec pages needed for len bytes.
- * Return a struct dma_pinned_list to keep track of pages pinned down.
- *
- * We are allocating a single chunk of memory, and then carving it up into
- * 3 sections, the latter 2 whose size depends on the number of iovecs and the
- * total number of pages, respectively.
- */
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
-{
- struct dma_pinned_list *local_list;
- struct page **pages;
- int i;
- int ret;
- int nr_iovecs = 0;
- int iovec_len_used = 0;
- int iovec_pages_used = 0;
-
- /* don't pin down non-user-based iovecs */
- if (segment_eq(get_fs(), KERNEL_DS))
- return NULL;
-
- /* determine how many iovecs/pages there are, up front */
- do {
- iovec_len_used += iov[nr_iovecs].iov_len;
- iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
- nr_iovecs++;
- } while (iovec_len_used < len);
-
- /* single kmalloc for pinned list, page_list[], and the page arrays */
- local_list = kmalloc(sizeof(*local_list)
- + (nr_iovecs * sizeof (struct dma_page_list))
- + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
- if (!local_list)
- goto out;
-
- /* list of pages starts right after the page list array */
- pages = (struct page **) &local_list->page_list[nr_iovecs];
-
- local_list->nr_iovecs = 0;
-
- for (i = 0; i < nr_iovecs; i++) {
- struct dma_page_list *page_list = &local_list->page_list[i];
-
- len -= iov[i].iov_len;
-
- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
- goto unpin;
-
- page_list->nr_pages = num_pages_spanned(&iov[i]);
- page_list->base_address = iov[i].iov_base;
-
- page_list->pages = pages;
- pages += page_list->nr_pages;
-
- /* pin pages down */
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(
- current,
- current->mm,
- (unsigned long) iov[i].iov_base,
- page_list->nr_pages,
- 1, /* write */
- 0, /* force */
- page_list->pages,
- NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret != page_list->nr_pages)
- goto unpin;
-
- local_list->nr_iovecs = i + 1;
- }
-
- return local_list;
-
-unpin:
- dma_unpin_iovec_pages(local_list);
-out:
- return NULL;
-}
-
-void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
-{
- int i, j;
-
- if (!pinned_list)
- return;
-
- for (i = 0; i < pinned_list->nr_iovecs; i++) {
- struct dma_page_list *page_list = &pinned_list->page_list[i];
- for (j = 0; j < page_list->nr_pages; j++) {
- set_page_dirty_lock(page_list->pages[j]);
- page_cache_release(page_list->pages[j]);
- }
- }
-
- kfree(pinned_list);
-}
-
-
-/*
- * We have already pinned down the pages we will be using in the iovecs.
- * Each entry in iov array has corresponding entry in pinned_list->page_list.
- * Using array indexing to keep iov[] and page_list[] in sync.
- * Initial elements in iov array's iov->iov_len will be 0 if already copied into
- * by another call.
- * iov array length remaining guaranteed to be bigger than len.
- */
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
-{
- int iov_byte_offset;
- int copy;
- dma_cookie_t dma_cookie = 0;
- int iovec_idx;
- int page_idx;
-
- if (!chan)
- return memcpy_toiovec(iov, kdata, len);
-
- iovec_idx = 0;
- while (iovec_idx < pinned_list->nr_iovecs) {
- struct dma_page_list *page_list;
-
- /* skip already used-up iovecs */
- while (!iov[iovec_idx].iov_len)
- iovec_idx++;
-
- page_list = &pinned_list->page_list[iovec_idx];
-
- iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
- page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
-
- /* break up copies to not cross page boundary */
- while (iov[iovec_idx].iov_len) {
- copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
- copy = min_t(int, copy, iov[iovec_idx].iov_len);
-
- dma_cookie = dma_async_memcpy_buf_to_pg(chan,
- page_list->pages[page_idx],
- iov_byte_offset,
- kdata,
- copy);
- /* poll for a descriptor slot */
- if (unlikely(dma_cookie < 0)) {
- dma_async_issue_pending(chan);
- continue;
- }
-
- len -= copy;
- iov[iovec_idx].iov_len -= copy;
- iov[iovec_idx].iov_base += copy;
-
- if (!len)
- return dma_cookie;
-
- kdata += copy;
- iov_byte_offset = 0;
- page_idx++;
- }
- iovec_idx++;
- }
-
- /* really bad if we ever run out of iovecs */
- BUG();
- return -EFAULT;
-}
-
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len)
-{
- int iov_byte_offset;
- int copy;
- dma_cookie_t dma_cookie = 0;
- int iovec_idx;
- int page_idx;
- int err;
-
- /* this needs as-yet-unimplemented buf-to-buff, so punt. */
- /* TODO: use dma for this */
- if (!chan || !pinned_list) {
- u8 *vaddr = kmap(page);
- err = memcpy_toiovec(iov, vaddr + offset, len);
- kunmap(page);
- return err;
- }
-
- iovec_idx = 0;
- while (iovec_idx < pinned_list->nr_iovecs) {
- struct dma_page_list *page_list;
-
- /* skip already used-up iovecs */
- while (!iov[iovec_idx].iov_len)
- iovec_idx++;
-
- page_list = &pinned_list->page_list[iovec_idx];
-
- iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
- page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
-
- /* break up copies to not cross page boundary */
- while (iov[iovec_idx].iov_len) {
- copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
- copy = min_t(int, copy, iov[iovec_idx].iov_len);
-
- dma_cookie = dma_async_memcpy_pg_to_pg(chan,
- page_list->pages[page_idx],
- iov_byte_offset,
- page,
- offset,
- copy);
- /* poll for a descriptor slot */
- if (unlikely(dma_cookie < 0)) {
- dma_async_issue_pending(chan);
- continue;
- }
-
- len -= copy;
- iov[iovec_idx].iov_len -= copy;
- iov[iovec_idx].iov_base += copy;
-
- if (!len)
- return dma_cookie;
-
- offset += copy;
- iov_byte_offset = 0;
- page_idx++;
- }
- iovec_idx++;
- }
-
- /* really bad if we ever run out of iovecs */
- BUG();
- return -EFAULT;
-}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 394cbc5c93e3..7938272f2edf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
return 0;
}
-static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+/* This function must be called with the mv_xor_chan spinlock held */
+static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
dma_cookie_t cookie = 0;
@@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
mv_chan->dmachan.completed_cookie = cookie;
}
-static void
-mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
-{
- spin_lock_bh(&mv_chan->lock);
- __mv_xor_slot_cleanup(mv_chan);
- spin_unlock_bh(&mv_chan->lock);
-}
-
static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
+
+ spin_lock_bh(&chan->lock);
mv_xor_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
}
static struct mv_xor_desc_slot *
@@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot *iter, *_iter;
int in_use_descs = 0;
+ spin_lock_bh(&mv_chan->lock);
+
mv_xor_slot_cleanup(mv_chan);
- spin_lock_bh(&mv_chan->lock);
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
chain_node) {
in_use_descs++;
@@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE) {
- mv_xor_clean_completed_slots(mv_chan);
+ if (ret == DMA_COMPLETE)
return ret;
- }
+
+ spin_lock_bh(&mv_chan->lock);
mv_xor_slot_cleanup(mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
return dma_cookie_status(chan, cookie, txstate);
}
@@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
- int i;
+ int i, ret;
void *src, *dest;
dma_addr_t src_dma, dest_dma;
struct dma_chan *dma_chan;
@@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
PAGE_SIZE, DMA_TO_DEVICE);
- unmap->to_cnt = 1;
unmap->addr[0] = src_dma;
+ ret = dma_mapping_error(dma_chan->device->dev, src_dma);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+ unmap->to_cnt = 1;
+
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- unmap->from_cnt = 1;
unmap->addr[1] = dest_dma;
+ ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+ unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
PAGE_SIZE, 0);
+ if (!tx) {
+ dev_err(dma_chan->device->dev,
+ "Self-test cannot prepare operation, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
cookie = mv_xor_tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(dma_chan->device->dev,
+ "Self-test submit error, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(1);
@@ -866,7 +889,7 @@ out:
static int
mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
- int i, src_idx;
+ int i, src_idx, ret;
struct page *dest;
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
@@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
unmap->to_cnt++;
}
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
src_count, PAGE_SIZE, 0);
+ if (!tx) {
+ dev_err(dma_chan->device->dev,
+ "Self-test cannot prepare operation, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
cookie = mv_xor_tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(dma_chan->device->dev,
+ "Self-test submit error, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(8);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 4cf7d9a950d7..bbea8243f9e8 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1017,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c)
return -EINVAL;
if (c->paused) {
+ mb();
+
+ /* Restore channel link register */
+ omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
+
omap_dma_start(c, c->desc);
c->paused = false;
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f8bf00010d45..bbd65149cdb2 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -87,61 +87,73 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
}
/*
+ * Select DCT to which PCI cfg accesses are routed
+ */
+static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
+{
+ u32 reg = 0;
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= (pvt->model == 0x30) ? ~3 : ~1;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+}
+
+/*
*
* Depending on the family, F2 DCT reads need special handling:
*
- * K8: has a single DCT only
+ * K8: has a single DCT only and no address offsets >= 0x100
*
* F10h: each DCT has its own set of regs
* DCT0 -> F2x040..
* DCT1 -> F2x140..
*
- * F15h: we select which DCT we access using F1x10C[DctCfgSel]
- *
* F16h: has only 1 DCT
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
*/
-static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
+static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
+ int offset, u32 *val)
{
- if (addr >= 0x100)
- return -EINVAL;
-
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
-}
+ switch (pvt->fam) {
+ case 0xf:
+ if (dct || offset >= 0x100)
+ return -EINVAL;
+ break;
-static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
-{
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
-}
+ case 0x10:
+ if (dct) {
+ /*
+ * Note: If ganging is enabled, barring the regs
+ * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
+ * return 0. (cf. Section 2.8.1 F10h BKDG)
+ */
+ if (dct_ganging_enabled(pvt))
+ return 0;
-/*
- * Select DCT to which PCI cfg accesses are routed
- */
-static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
-{
- u32 reg = 0;
+ offset += 0x100;
+ }
+ break;
- amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
- reg &= (pvt->model >= 0x30) ? ~3 : ~1;
- reg |= dct;
- amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
-}
+ case 0x15:
+ /*
+ * F15h: F2x1xx addresses do not map explicitly to DCT1.
+ * We should select which DCT we access using F1x10C[DctCfgSel]
+ */
+ dct = (dct && pvt->model == 0x30) ? 3 : dct;
+ f15h_select_dct(pvt, dct);
+ break;
-static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
- const char *func)
-{
- u8 dct = 0;
+ case 0x16:
+ if (dct)
+ return -EINVAL;
+ break;
- /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
- if (addr >= 0x140 && addr <= 0x1a0) {
- dct = (pvt->model >= 0x30) ? 3 : 1;
- addr -= 0x100;
+ default:
+ break;
}
-
- f15h_select_dct(pvt, dct);
-
- return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+ return amd64_read_pci_cfg(pvt->F2, offset, val);
}
/*
@@ -768,16 +780,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0);
- if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf)
continue;
- if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
+ cs, *base1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
@@ -786,16 +799,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0);
- if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf)
continue;
- if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
}
@@ -1198,7 +1212,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (pvt->fam == 0xf)
return;
- if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
@@ -1219,7 +1233,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
dct_sel_interleave_addr(pvt));
}
- amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
+ amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
@@ -1430,7 +1444,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
return sys_addr;
}
- amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
+ amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
if (!(swap_reg & 0x1))
return sys_addr;
@@ -1723,10 +1737,16 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
WARN_ON(ctrl != 0);
}
- dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
- : pvt->csels[0].csbases;
-
+ if (pvt->fam == 0x10) {
+ dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
+ : pvt->dbam0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
+ pvt->csels[1].csbases :
+ pvt->csels[0].csbases;
+ } else if (ctrl) {
+ dbam = pvt->dbam0;
+ dcsb = pvt->csels[1].csbases;
+ }
edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
ctrl, dbam);
@@ -1760,7 +1780,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
- .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1771,7 +1790,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
- .read_dct_pci_cfg = f10_read_dct_pci_cfg,
}
},
[F15_CPUS] = {
@@ -1782,7 +1800,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select,
- .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
[F15_M30H_CPUS] = {
@@ -1793,7 +1810,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
- .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
[F16_CPUS] = {
@@ -1804,7 +1820,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
- .read_dct_pci_cfg = f10_read_dct_pci_cfg,
}
},
[F16_M30H_CPUS] = {
@@ -1815,7 +1830,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
- .read_dct_pci_cfg = f10_read_dct_pci_cfg,
}
},
};
@@ -2148,25 +2162,25 @@ static void read_mc_regs(struct amd64_pvt *pvt)
read_dct_base_mask(pvt);
amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
- amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
+ amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
- amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
if (!dct_ganging_enabled(pvt)) {
- amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
- amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
+ amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
pvt->ecc_sym_sz = 4;
if (pvt->fam >= 0x10) {
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ /* F16h has only DCT0, so no need to read dbam1 */
if (pvt->fam != 0x16)
- /* F16h has only DCT0 */
- amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+ amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
/* F10h, revD and later can do x8 ECC too */
if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index d903e0c21144..55fb5941c6d4 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -481,8 +481,6 @@ struct low_ops {
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *);
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
- int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
- u32 *val, const char *func);
};
struct amd64_family_type {
@@ -502,9 +500,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
#define amd64_write_pci_cfg(pdev, offset, val) \
__amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
-#define amd64_read_dct_pci_cfg(pvt, offset, val) \
- pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
-
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3c2625e7980d..6c9f381e8fe6 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -66,7 +66,7 @@
#define EDAC_PCI "PCI"
#define EDAC_DEBUG "DEBUG"
-extern const char *edac_mem_types[];
+extern const char * const edac_mem_types[];
#ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9f134823fa75..c3893b0ddb18 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -128,7 +128,7 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
/*
* keep those in sync with the enum mem_type
*/
-const char *edac_mem_types[] = {
+const char * const edac_mem_types[] = {
"Empty csrow",
"Reserved csrow type",
"Unknown csrow type",
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index f4aec2e6ef56..7d3742edbaa2 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_l2_isr, 0,
+ mpc85xx_l2_isr, IRQF_SHARED,
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index ef6b7e08f485..0f04d5ead521 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -974,7 +974,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
for (j = 0; j < csi->nr_channels; j++) {
- struct dimm_info *dimm = csi->channels[j].dimm;
+ struct dimm_info *dimm = csi->channels[j]->dimm;
dimm->nr_pages = nr_pages / csi->nr_channels;
dimm->grain = 1;
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index d8be608a9f3b..aef6a95adef5 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
-obj-$(CONFIG_EFI_STUB) += libstub/
+obj-$(CONFIG_EFI_ARM_STUB) += libstub/
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d62eaaa75397..687476fb39e3 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -377,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_chip *chip = achip->chip;
struct acpi_resource_gpio *agpio;
struct acpi_resource *ares;
+ int pin_index = (int)address;
acpi_status status;
bool pull_up;
+ int length;
int i;
status = acpi_buffer_to_resource(achip->conn_info.connection,
@@ -400,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
return AE_BAD_PARAMETER;
}
- for (i = 0; i < agpio->pin_table_length; i++) {
+ length = min(agpio->pin_table_length, (u16)(pin_index + bits));
+ for (i = pin_index; i < length; ++i) {
unsigned pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 15cc0bb65dda..c68d037de656 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -413,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
return;
}
- irq_set_chained_handler(parent_irq, parent_handler);
/*
* The parent irqchip is already using the chip_data for this
* irqchip, so our callbacks simply use the handler_data.
*/
irq_set_handler_data(parent_irq, gpiochip);
+ irq_set_chained_handler(parent_irq, parent_handler);
}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
@@ -1674,7 +1674,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
/* No particular flag request, return here... */
- if (flags & GPIOD_FLAGS_BIT_DIR_SET)
+ if (!(flags & GPIOD_FLAGS_BIT_DIR_SET))
return desc;
/* Process flags */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index dea99d92fb4a..4b7ed5289217 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
- if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
- return ret;
+ if (hash_empty(ring->cmd_hash)) {
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
+ }
}
ring->needs_cmd_parser = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1411613f2174..e42925f76b4b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+ intel_gtt_chipset_flush();
+ } else {
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ }
+}
+
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
+
+ i915_ggtt_flush(dev_priv);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
- i915_gem_chipset_flush(dev);
+ i915_ggtt_flush(dev_priv);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f84dd263ee8..9842fd2e742a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -66,12 +66,12 @@ module_param_named(powersave, i915.powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-module_param_named(semaphores, i915.semaphores, int, 0400);
+module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
-module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(enable_rc6,
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
-module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index eee79e1c3222..afcc8dd40bdd 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -946,7 +946,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
port_name(port));
if (is_dvi && (port == PORT_A || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+ DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
if (!is_dvi && !is_dp && !is_crt)
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ca34de7f6a7b..5a9de21637b7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -732,7 +732,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (tmp & HDMI_MODE_SELECT_HDMI)
+ if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
if (!HAS_PCH_SPLIT(dev) &&
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ca52ad2ae7d1..d8de1d5140a7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
return -EINVAL;
}
+/*
+ * If the vendor backlight interface is not in use and ACPI backlight interface
+ * is broken, do not bother processing backlight change requests from firmware.
+ */
+static bool should_ignore_backlight_request(void)
+{
+ return acpi_video_backlight_support() &&
+ !acpi_video_verify_backlight_support();
+}
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
- /*
- * If the acpi_video interface is not supposed to be used, don't
- * bother processing backlight level change requests from firmware.
- */
- if (!acpi_video_verify_backlight_support()) {
+ if (should_ignore_backlight_request()) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 4b5bb5d58a54..f8cbb512132f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
const int or = ffs(outp->or) - 1;
const u32 loff = (or * 0x800) + (link * 0x80);
const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ struct dcb_output match;
u8 ver, hdr;
- if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 99cd9e4a2aa6..3440fc999f2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_software_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
+ bool save;
nvif_object_map(chan->object);
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise synchronisation */
- return nouveau_fence(chan->drm)->context_new(chan);
+ save = cli->base.super;
+ cli->base.super = true; /* hack until fencenv50 fixed */
+ ret = nouveau_fence(chan->drm)->context_new(chan);
+ cli->base.super = save;
+ return ret;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 65b4fd53dd4e..4a21b2b06ce2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev)
}
int
-nouveau_display_suspend(struct drm_device *dev)
+nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
nouveau_display_fini(dev);
- NV_INFO(drm, "unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev)
}
void
-nouveau_display_repin(struct drm_device *dev)
+nouveau_display_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret;
+ int ret, head;
+ /* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev)
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
}
-}
-
-void
-nouveau_display_resume(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- int head;
nouveau_display_init(dev);
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev)
for (head = 0; head < dev->mode_config.num_crtc; head++)
drm_vblank_on(dev, head);
+ /* This should ensure we don't hit a locking problem when someone
+ * wakes us up via a connector. We should never go into suspend
+ * while the display is on anyways.
+ */
+ if (runtime)
+ return;
+
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 88ca177cb1c7..be3d5947c6be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev);
-int nouveau_display_suspend(struct drm_device *dev);
-void nouveau_display_repin(struct drm_device *dev);
-void nouveau_display_resume(struct drm_device *dev);
+int nouveau_display_suspend(struct drm_device *dev, bool runtime);
+void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, int);
void nouveau_display_vblank_disable(struct drm_device *, int);
int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 9c3af96a7153..3ed32dd90303 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
- if (dev->mode_config.num_crtc && !runtime) {
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "suspending console...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
NV_INFO(drm, "suspending display...\n");
- ret = nouveau_display_suspend(dev);
+ ret = nouveau_display_suspend(dev, runtime);
if (ret)
return ret;
}
@@ -603,7 +605,7 @@ fail_client:
fail_display:
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ nouveau_display_resume(dev, runtime);
}
return ret;
}
@@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev)
drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 1);
-
ret = nouveau_do_suspend(drm_dev, false);
if (ret)
return ret;
@@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev)
}
static int
-nouveau_do_resume(struct drm_device *dev)
+nouveau_do_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
@@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev)
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
- nouveau_display_repin(dev);
+ nouveau_display_resume(dev, runtime);
+ NV_INFO(drm, "resuming console...\n");
+ nouveau_fbcon_set_suspend(dev, 0);
}
return 0;
@@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev);
- if (ret)
- return ret;
-
- if (drm_dev->mode_config.num_crtc) {
- nouveau_display_resume(drm_dev);
- nouveau_fbcon_set_suspend(drm_dev, 0);
- }
-
- return 0;
+ return nouveau_do_resume(drm_dev, false);
}
static int nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 1);
-
- ret = nouveau_do_suspend(drm_dev, false);
- return ret;
+ return nouveau_do_suspend(drm_dev, false);
}
static int nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- ret = nouveau_do_resume(drm_dev);
- if (ret)
- return ret;
-
- if (drm_dev->mode_config.num_crtc) {
- nouveau_display_resume(drm_dev);
- nouveau_fbcon_set_suspend(drm_dev, 0);
- }
-
- return 0;
+ return nouveau_do_resume(drm_dev, false);
}
@@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev);
+ ret = nouveau_do_resume(drm_dev, true);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8bdd27091db8..49fe6075cc7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
+ console_lock();
+ nouveau_fbcon_accel_restore(fbcon->dev);
+ nouveau_fbcon_zfill(fbcon->dev, fbcon);
+ fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
+ console_unlock();
+}
int
nouveau_fbcon_init(struct drm_device *dev)
@@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
+ INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
fbcon->dev = dev;
drm->fbcon = fbcon;
@@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
- console_lock();
- if (state == 0) {
- nouveau_fbcon_accel_restore(dev);
- nouveau_fbcon_zfill(dev, drm->fbcon);
+ if (state == FBINFO_STATE_RUNNING) {
+ schedule_work(&drm->fbcon->work);
+ return;
}
+ flush_work(&drm->fbcon->work);
+ console_lock();
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 1)
- nouveau_fbcon_accel_save_disable(dev);
+ nouveau_fbcon_accel_save_disable(dev);
console_unlock();
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 34658cfa8f5d..0b465c7d3907 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -36,6 +36,7 @@ struct nouveau_fbdev {
struct nouveau_framebuffer nouveau_fb;
struct list_head fbdev_list;
struct drm_device *dev;
+ struct work_struct work;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index fa9565957f9d..3d546c606b43 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4803,7 +4803,7 @@ struct bonaire_mqd
*/
static int cik_cp_compute_resume(struct radeon_device *rdev)
{
- int r, i, idx;
+ int r, i, j, idx;
u32 tmp;
bool use_doorbell = true;
u64 hqd_gpu_addr;
@@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->queue_state.cp_hqd_pq_wptr= 0;
if (RREG32(CP_HQD_ACTIVE) & 1) {
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
- for (i = 0; i < rdev->usec_timeout; i++) {
+ for (j = 0; j < rdev->usec_timeout; j++) {
if (!(RREG32(CP_HQD_ACTIVE) & 1))
break;
udelay(1);
@@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -8251,6 +8251,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -8259,7 +8260,6 @@ restart_ih:
if (queue_thermal)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index dbca60c7d097..e50807c29f69 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -5137,6 +5137,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5146,6 @@ restart_ih:
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 3cfb50056f7a..ea5c9af722ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3792,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -4048,6 +4048,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -4056,7 +4057,6 @@ restart_ih:
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5f05b4c84338..3247bfd14410 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -106,6 +106,7 @@ extern int radeon_vm_block_size;
extern int radeon_deep_color;
extern int radeon_use_pflipirq;
extern int radeon_bapm;
+extern int radeon_backlight;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 75223dd3a8a3..12c8329644c4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4126fd0937a2..f9d17b29b343 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -181,6 +181,7 @@ int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
+int radeon_backlight = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -263,6 +264,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, radeon_bapm, int, 0444);
+MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(backlight, radeon_backlight, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c2094c25b53..15edf23b465c 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret;
}
+static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ bool use_bl = false;
+
+ if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
+ return;
+
+ if (radeon_backlight == 0) {
+ return;
+ } else if (radeon_backlight == 1) {
+ use_bl = true;
+ } else if (radeon_backlight == -1) {
+ /* Quirks */
+ /* Amilo Xi 2550 only works with acpi bl */
+ if ((rdev->pdev->device == 0x9583) &&
+ (rdev->pdev->subsystem_vendor == 0x1734) &&
+ (rdev->pdev->subsystem_device == 0x1107))
+ use_bl = false;
+ else
+ use_bl = true;
+ }
+
+ if (use_bl) {
+ if (rdev->is_atom_bios)
+ radeon_atom_backlight_init(radeon_encoder, connector);
+ else
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ rdev->mode_info.bl_encoder = radeon_encoder;
+ }
+}
+
void
radeon_link_encoder_connector(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (rdev->is_atom_bios)
- radeon_atom_backlight_init(radeon_encoder, connector);
- else
- radeon_legacy_backlight_init(radeon_encoder, connector);
- rdev->mode_info.bl_encoder = radeon_encoder;
- }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_encoder_add_backlight(radeon_encoder, connector);
}
}
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6bce40847753..3a0b973e8a96 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6664,13 +6664,13 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index c18d5d71062d..f42df4dd58d2 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -530,6 +530,17 @@ config PANTHERLORD_FF
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter and want to enable force feedback support for it.
+config HID_PENMOUNT
+ tristate "Penmount touch device"
+ depends on USB_HID
+ ---help---
+ This selects a driver for the PenMount 6000 touch controller.
+
+ The driver works around a problem in the report descript allowing
+ the userspace to touch events instead of mouse events.
+
+ Say Y here if you have a Penmount based touch controller.
+
config HID_PETALYNX
tristate "Petalynx Maxter remote control"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 4dbac7f8530c..e2850d8af9ca 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
+obj-$(CONFIG_HID_PENMOUNT) += hid-penmount.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
hid-picolcd-y += hid-picolcd_core.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 12b6e67d9de0..73bd9e2e42bc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL_GPL(hid_debug);
static int hid_ignore_special_drivers = 0;
module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
-MODULE_PARM_DESC(debug, "Ignore any special drivers and handle all devices by generic driver");
+MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
/*
* Register a new report for a device.
@@ -1591,6 +1591,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
hdev->claimed |= HID_CLAIMED_HIDRAW;
+ if (connect_mask & HID_CONNECT_DRIVER)
+ hdev->claimed |= HID_CLAIMED_DRIVER;
+
/* Drivers with the ->raw_event callback set are not required to connect
* to any other listener. */
if (!hdev->claimed && !hdev->driver->raw_event) {
@@ -1793,6 +1796,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
@@ -1880,6 +1884,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
#if IS_ENABLED(CONFIG_HID_ROCCAT)
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index d60fbd0adc0c..78b3a0c76775 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -29,6 +29,7 @@
* and Zalman ZM-GM1
* - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
* - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
+ * - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse
*/
static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -42,6 +43,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
switch (hdev->product) {
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2:
if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
&& rdesc[120] == 0xff && rdesc[121] == 0x7f) {
hid_info(hdev, "Fixing up report descriptor\n");
@@ -74,6 +76,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
{ }
};
MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 25cd674d6064..cd9c9e96cf0e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -296,6 +296,9 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+#define USB_VENDOR_ID_ELAN 0x04f3
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
+
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -479,6 +482,7 @@
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070 0xa070
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096
#define USB_VENDOR_ID_IMATION 0x0718
@@ -722,6 +726,7 @@
#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
#define USB_DEVICE_ID_PENMOUNT_1610 0x1610
#define USB_DEVICE_ID_PENMOUNT_1640 0x1640
+#define USB_DEVICE_ID_PENMOUNT_6000 0x6000
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
@@ -733,6 +738,8 @@
#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
#define USB_VENDOR_ID_PIXART 0x093a
+#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2 0x0137
+#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE 0x2510
#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2619f7f4517a..2df7fddbd119 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -599,6 +599,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
/* These usage IDs map directly to the usage codes. */
case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs_clear(usage->hid & 0xf);
+ break;
+
case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
if (field->flags & HID_MAIN_ITEM_RELATIVE)
map_rel(usage->hid & 0xf);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 9bf8637747a5..71f569292cab 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -385,18 +385,6 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
djdev = djrcv_dev->paired_dj_devices[dj_report->device_index];
- if (!djdev) {
- dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
- " is NULL, index %d\n", dj_report->device_index);
- kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
-
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was already "
- "queued\n", __func__);
- }
- return;
- }
-
memset(reportbuffer, 0, sizeof(reportbuffer));
for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) {
@@ -421,18 +409,6 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index];
- if (dj_device == NULL) {
- dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
- " is NULL, index %d\n", dj_report->device_index);
- kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
-
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was already "
- "queued\n", __func__);
- }
- return;
- }
-
if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) ||
(hid_reportid_size_map[dj_report->report_type] == 0)) {
dbg_hid("invalid report type:%x\n", dj_report->report_type);
@@ -701,8 +677,17 @@ static int logi_dj_raw_event(struct hid_device *hdev,
}
spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+ if (!djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* received an event for an unknown device, bail out */
+ logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+ goto out;
+ }
+
switch (dj_report->report_type) {
case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ /* pairing notifications are handled above the switch */
+ break;
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
logi_dj_recv_queue_notification(djrcv_dev, dj_report);
break;
@@ -715,6 +700,8 @@ static int logi_dj_raw_event(struct hid_device *hdev,
default:
logi_dj_recv_forward_report(djrcv_dev, dj_report);
}
+
+out:
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return true;
diff --git a/drivers/hid/hid-penmount.c b/drivers/hid/hid-penmount.c
new file mode 100644
index 000000000000..c11dce85cd18
--- /dev/null
+++ b/drivers/hid/hid-penmount.c
@@ -0,0 +1,49 @@
+/*
+ * HID driver for PenMount touchscreens
+ *
+ * Copyright (c) 2014 Christian Gmeiner <christian.gmeiner <at> gmail.com>
+ *
+ * based on hid-penmount copyrighted by
+ * PenMount Touch Solutions <penmount <at> seed.net.tw>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/hid.h>
+#include "hid-ids.h"
+
+static int penmount_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id penmount_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, penmount_devices);
+
+static struct hid_driver penmount_driver = {
+ .name = "hid-penmount",
+ .id_table = penmount_devices,
+ .input_mapping = penmount_input_mapping,
+};
+
+module_hid_driver(penmount_driver);
+
+MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
+MODULE_DESCRIPTION("PenMount HID TouchScreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 020df3c2e8b4..c1b29a9eb41a 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -351,8 +351,8 @@ static int picolcd_raw_event(struct hid_device *hdev,
return 1;
if (size > 64) {
- hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
- size);
+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event (%d)\n",
+ size, report->id);
return 0;
}
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 8389e8109218..3cccff73b9b9 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -320,10 +320,7 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
int offset;
int i;
- if (size < hdata->f11.report_size)
- return 0;
-
- if (!(irq & hdata->f11.irq_mask))
+ if (!(irq & hdata->f11.irq_mask) || size <= 0)
return 0;
offset = (hdata->max_fingers >> 2) + 1;
@@ -332,9 +329,19 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
int fs_bit_position = (i & 0x3) << 1;
int finger_state = (data[fs_byte_position] >> fs_bit_position) &
0x03;
+ int position = offset + 5 * i;
+
+ if (position + 5 > size) {
+ /* partial report, go on with what we received */
+ printk_once(KERN_WARNING
+ "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n",
+ dev_driver_string(&hdev->dev),
+ dev_name(&hdev->dev));
+ hid_dbg(hdev, "Incomplete finger report\n");
+ break;
+ }
- rmi_f11_process_touch(hdata, i, finger_state,
- &data[offset + 5 * i]);
+ rmi_f11_process_touch(hdata, i, finger_state, &data[position]);
}
input_mt_sync_frame(hdata->input);
input_sync(hdata->input);
@@ -352,6 +359,11 @@ static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data,
if (!(irq & hdata->f30.irq_mask))
return 0;
+ if (size < (int)hdata->f30.report_size) {
+ hid_warn(hdev, "Click Button pressed, but the click data is missing\n");
+ return 0;
+ }
+
for (i = 0; i < hdata->gpio_led_count; i++) {
if (test_bit(i, &hdata->button_mask)) {
value = (data[i / 8] >> (i & 0x07)) & BIT(0);
@@ -412,9 +424,29 @@ static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size)
return 1;
}
+static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size)
+{
+ int valid_size = size;
+ /*
+ * On the Dell XPS 13 9333, the bus sometimes get confused and fills
+ * the report with a sentinel value "ff". Synaptics told us that such
+ * behavior does not comes from the touchpad itself, so we filter out
+ * such reports here.
+ */
+
+ while ((data[valid_size - 1] == 0xff) && valid_size > 0)
+ valid_size--;
+
+ return valid_size;
+}
+
static int rmi_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
+ size = rmi_check_sanity(hdev, data, size);
+ if (size < 2)
+ return 0;
+
switch (data[0]) {
case RMI_READ_DATA_REPORT_ID:
return rmi_read_data_event(hdev, data, size);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 2ac25760a9a9..e6d8e18dae97 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -709,6 +709,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
+ USB_DEVICE_ID_STM_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR_1),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index c372368e438c..bc4269e559f1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1,5 +1,5 @@
/*
- * HID driver for Sony / PS2 / PS3 BD devices.
+ * HID driver for Sony / PS2 / PS3 / PS4 BD devices.
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
@@ -8,6 +8,7 @@
* Copyright (c) 2012 David Dillow <dave@thedillows.org>
* Copyright (c) 2006-2013 Jiri Kosina
* Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com>
+ * Copyright (c) 2014 Frank Praznik <frank.praznik@gmail.com>
*/
/*
@@ -176,7 +177,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x75, 0x06, /* Report Size (6), */
0x95, 0x01, /* Report Count (1), */
0x15, 0x00, /* Logical Minimum (0), */
- 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x25, 0x3F, /* Logical Maximum (63), */
0x81, 0x02, /* Input (Variable), */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x33, /* Usage (Rx), */
@@ -200,14 +201,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */
- 0x26, 0x00, 0x40, /* Logical Maximum (16384), */
+ 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
+ 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
0x09, 0x21, /* Usage (21h), */
0x15, 0x00, /* Logical Minimum (0), */
- 0x25, 0xFF, /* Logical Maximum (255), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
0x75, 0x08, /* Report Size (8), */
0x95, 0x27, /* Report Count (39), */
0x81, 0x02, /* Input (Variable), */
@@ -395,11 +396,11 @@ static u8 dualshock4_usb_rdesc[] = {
/*
* The default behavior of the Dualshock 4 is to send reports using report
- * type 1 when running over Bluetooth. However, as soon as it receives a
- * report of type 17 to set the LEDs or rumble it starts returning it's state
- * in report 17 instead of 1. Since report 17 is undefined in the default HID
+ * type 1 when running over Bluetooth. However, when feature report 2 is
+ * requested during the controller initialization it starts sending input
+ * reports in report 17. Since report 17 is undefined in the default HID
* descriptor the button and axis definitions must be moved to report 17 or
- * the HID layer won't process the received input once a report is sent.
+ * the HID layer won't process the received input.
*/
static u8 dualshock4_bt_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
@@ -509,8 +510,8 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */
- 0x26, 0x00, 0x40, /* Logical Maximum (16384), */
+ 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
+ 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -935,12 +936,13 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
if (rd[30] >= 0xee) {
battery_capacity = 100;
battery_charging = !(rd[30] & 0x01);
+ cable_state = 1;
} else {
__u8 index = rd[30] <= 5 ? rd[30] : 5;
battery_capacity = sixaxis_battery_capacity[index];
battery_charging = 0;
+ cable_state = 0;
}
- cable_state = !(rd[31] & 0x04);
spin_lock_irqsave(&sc->lock, flags);
sc->cable_state = cable_state;
@@ -1082,6 +1084,38 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+ int w, int h)
+{
+ struct input_dev *input_dev = hi->input;
+ int ret;
+
+ ret = input_mt_init_slots(input_dev, touch_count, 0);
+ if (ret < 0)
+ return ret;
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ return 0;
+}
+
+static void sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER) {
+ if (sony_register_touchpad(hidinput, 2, 1920, 942) != 0)
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots\n");
+ }
+}
+
/*
* Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
* to "operational". Without this, the ps3 controller will not report any
@@ -1654,26 +1688,6 @@ static void sony_battery_remove(struct sony_sc *sc)
sc->battery.name = NULL;
}
-static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
- int w, int h)
-{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
- int ret;
-
- ret = input_mt_init_slots(input_dev, touch_count, 0);
- if (ret < 0) {
- hid_err(sc->hdev, "Unable to initialize multi-touch slots\n");
- return ret;
- }
-
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
-
- return 0;
-}
-
/*
* If a controller is plugged in via USB while already connected via Bluetooth
* it will show up as two devices. A global list of connected controllers and
@@ -1923,13 +1937,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop;
}
}
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x940.
- */
- ret = sony_register_touchpad(sc, 2, 1920, 940);
- if (ret < 0)
- goto err_stop;
sony_init_work(sc, dualshock4_state_worker);
} else {
@@ -2037,13 +2044,14 @@ static const struct hid_device_id sony_devices[] = {
MODULE_DEVICE_TABLE(hid, sony_devices);
static struct hid_driver sony_driver = {
- .name = "sony",
- .id_table = sony_devices,
- .input_mapping = sony_mapping,
- .probe = sony_probe,
- .remove = sony_remove,
- .report_fixup = sony_report_fixup,
- .raw_event = sony_raw_event
+ .name = "sony",
+ .id_table = sony_devices,
+ .input_mapping = sony_mapping,
+ .input_configured = sony_input_configured,
+ .probe = sony_probe,
+ .remove = sony_remove,
+ .report_fixup = sony_report_fixup,
+ .raw_event = sony_raw_event
};
static int __init sony_init(void)
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index 134be89b15ea..b95d3978c272 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -208,10 +208,10 @@ unregister_red:
static void thingm_remove_rgb(struct thingm_rgb *rgb)
{
- flush_work(&rgb->work);
led_classdev_unregister(&rgb->red.ldev);
led_classdev_unregister(&rgb->green.ldev);
led_classdev_unregister(&rgb->blue.ldev);
+ flush_work(&rgb->work);
}
static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -250,6 +250,7 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!tdev->fwinfo) {
hid_err(hdev, "unsupported firmware %c\n", tdev->version.major);
+ err = -ENODEV;
goto stop;
}
@@ -286,10 +287,10 @@ static void thingm_remove(struct hid_device *hdev)
struct thingm_device *tdev = hid_get_drvdata(hdev);
int i;
+ hid_hw_stop(hdev);
+
for (i = 0; i < tdev->fwinfo->numrgb; ++i)
thingm_remove_rgb(tdev->rgb + i);
-
- hid_hw_stop(hdev);
}
static const struct hid_device_id thingm_table[] = {
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 0cb92e347258..e094c572b86e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -44,10 +44,12 @@ struct uhid_device {
__u8 tail;
struct uhid_event *outq[UHID_BUFSIZE];
+ /* blocking GET_REPORT support; state changes protected by qlock */
struct mutex report_lock;
wait_queue_head_t report_wait;
- atomic_t report_done;
- atomic_t report_id;
+ bool report_running;
+ u32 report_id;
+ u32 report_type;
struct uhid_event report_buf;
};
@@ -90,8 +92,27 @@ static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
static int uhid_hid_start(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
+ struct uhid_event *ev;
+ unsigned long flags;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_START;
- return uhid_queue_event(uhid, UHID_START);
+ if (hid->report_enum[HID_FEATURE_REPORT].numbered)
+ ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
+ if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
+ ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
+ if (hid->report_enum[HID_INPUT_REPORT].numbered)
+ ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
}
static void uhid_hid_stop(struct hid_device *hid)
@@ -123,87 +144,169 @@ static int uhid_hid_parse(struct hid_device *hid)
return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
}
-static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
- __u8 *buf, size_t count, unsigned char rtype)
+/* must be called with report_lock held */
+static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
+ struct uhid_event *ev,
+ __u32 *report_id)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ *report_id = ++uhid->report_id;
+ uhid->report_type = ev->type + 1;
+ uhid->report_running = true;
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ ret = wait_event_interruptible_timeout(uhid->report_wait,
+ !uhid->report_running || !uhid->running,
+ 5 * HZ);
+ if (!ret || !uhid->running || uhid->report_running)
+ ret = -EIO;
+ else if (ret < 0)
+ ret = -ERESTARTSYS;
+ else
+ ret = 0;
+
+ uhid->report_running = false;
+
+ return ret;
+}
+
+static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
+ const struct uhid_event *ev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+
+ /* id for old report; drop it silently */
+ if (uhid->report_type != ev->type || uhid->report_id != id)
+ goto unlock;
+ if (!uhid->report_running)
+ goto unlock;
+
+ memcpy(&uhid->report_buf, ev, sizeof(*ev));
+ uhid->report_running = false;
+ wake_up_interruptible(&uhid->report_wait);
+
+unlock:
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+}
+
+static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
+ u8 *buf, size_t count, u8 rtype)
{
struct uhid_device *uhid = hid->driver_data;
- __u8 report_type;
+ struct uhid_get_report_reply_req *req;
struct uhid_event *ev;
- unsigned long flags;
int ret;
- size_t uninitialized_var(len);
- struct uhid_feature_answer_req *req;
if (!uhid->running)
return -EIO;
- switch (rtype) {
- case HID_FEATURE_REPORT:
- report_type = UHID_FEATURE_REPORT;
- break;
- case HID_OUTPUT_REPORT:
- report_type = UHID_OUTPUT_REPORT;
- break;
- case HID_INPUT_REPORT:
- report_type = UHID_INPUT_REPORT;
- break;
- default:
- return -EINVAL;
- }
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_GET_REPORT;
+ ev->u.get_report.rnum = rnum;
+ ev->u.get_report.rtype = rtype;
ret = mutex_lock_interruptible(&uhid->report_lock);
- if (ret)
+ if (ret) {
+ kfree(ev);
return ret;
+ }
- ev = kzalloc(sizeof(*ev), GFP_KERNEL);
- if (!ev) {
- ret = -ENOMEM;
+ /* this _always_ takes ownership of @ev */
+ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
+ if (ret)
goto unlock;
+
+ req = &uhid->report_buf.u.get_report_reply;
+ if (req->err) {
+ ret = -EIO;
+ } else {
+ ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
+ memcpy(buf, req->data, ret);
}
- spin_lock_irqsave(&uhid->qlock, flags);
- ev->type = UHID_FEATURE;
- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
- ev->u.feature.rnum = rnum;
- ev->u.feature.rtype = report_type;
+unlock:
+ mutex_unlock(&uhid->report_lock);
+ return ret;
+}
- atomic_set(&uhid->report_done, 0);
- uhid_queue(uhid, ev);
- spin_unlock_irqrestore(&uhid->qlock, flags);
+static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
+ const u8 *buf, size_t count, u8 rtype)
+{
+ struct uhid_device *uhid = hid->driver_data;
+ struct uhid_event *ev;
+ int ret;
- ret = wait_event_interruptible_timeout(uhid->report_wait,
- atomic_read(&uhid->report_done), 5 * HZ);
-
- /*
- * Make sure "uhid->running" is cleared on shutdown before
- * "uhid->report_done" is set.
- */
- smp_rmb();
- if (!ret || !uhid->running) {
- ret = -EIO;
- } else if (ret < 0) {
- ret = -ERESTARTSYS;
- } else {
- spin_lock_irqsave(&uhid->qlock, flags);
- req = &uhid->report_buf.u.feature_answer;
+ if (!uhid->running || count > UHID_DATA_MAX)
+ return -EIO;
- if (req->err) {
- ret = -EIO;
- } else {
- ret = 0;
- len = min(count,
- min_t(size_t, req->size, UHID_DATA_MAX));
- memcpy(buf, req->data, len);
- }
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_SET_REPORT;
+ ev->u.set_report.rnum = rnum;
+ ev->u.set_report.rtype = rtype;
+ ev->u.set_report.size = count;
+ memcpy(ev->u.set_report.data, buf, count);
- spin_unlock_irqrestore(&uhid->qlock, flags);
+ ret = mutex_lock_interruptible(&uhid->report_lock);
+ if (ret) {
+ kfree(ev);
+ return ret;
}
- atomic_set(&uhid->report_done, 1);
+ /* this _always_ takes ownership of @ev */
+ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
+ if (ret)
+ goto unlock;
+
+ if (uhid->report_buf.u.set_report_reply.err)
+ ret = -EIO;
+ else
+ ret = count;
unlock:
mutex_unlock(&uhid->report_lock);
- return ret ? ret : len;
+ return ret;
+}
+
+static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ u8 u_rtype;
+
+ switch (rtype) {
+ case HID_FEATURE_REPORT:
+ u_rtype = UHID_FEATURE_REPORT;
+ break;
+ case HID_OUTPUT_REPORT:
+ u_rtype = UHID_OUTPUT_REPORT;
+ break;
+ case HID_INPUT_REPORT:
+ u_rtype = UHID_INPUT_REPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
+ case HID_REQ_SET_REPORT:
+ return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
+ default:
+ return -EIO;
+ }
}
static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
@@ -250,29 +353,14 @@ static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
}
-static int uhid_raw_request(struct hid_device *hid, unsigned char reportnum,
- __u8 *buf, size_t len, unsigned char rtype,
- int reqtype)
-{
- switch (reqtype) {
- case HID_REQ_GET_REPORT:
- return uhid_hid_get_raw(hid, reportnum, buf, len, rtype);
- case HID_REQ_SET_REPORT:
- /* TODO: implement proper SET_REPORT functionality */
- return -ENOSYS;
- default:
- return -EIO;
- }
-}
-
static struct hid_ll_driver uhid_hid_driver = {
.start = uhid_hid_start,
.stop = uhid_hid_stop,
.open = uhid_hid_open,
.close = uhid_hid_close,
.parse = uhid_hid_parse,
+ .raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
- .raw_request = uhid_raw_request,
};
#ifdef CONFIG_COMPAT
@@ -363,28 +451,27 @@ static int uhid_event_from_user(const char __user *buffer, size_t len,
}
#endif
-static int uhid_dev_create(struct uhid_device *uhid,
- const struct uhid_event *ev)
+static int uhid_dev_create2(struct uhid_device *uhid,
+ const struct uhid_event *ev)
{
struct hid_device *hid;
+ size_t rd_size, len;
+ void *rd_data;
int ret;
if (uhid->running)
return -EALREADY;
- uhid->rd_size = ev->u.create.rd_size;
- if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
+ rd_size = ev->u.create2.rd_size;
+ if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
return -EINVAL;
- uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL);
- if (!uhid->rd_data)
+ rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
+ if (!rd_data)
return -ENOMEM;
- if (copy_from_user(uhid->rd_data, ev->u.create.rd_data,
- uhid->rd_size)) {
- ret = -EFAULT;
- goto err_free;
- }
+ uhid->rd_size = rd_size;
+ uhid->rd_data = rd_data;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
@@ -392,19 +479,19 @@ static int uhid_dev_create(struct uhid_device *uhid,
goto err_free;
}
- strncpy(hid->name, ev->u.create.name, 127);
- hid->name[127] = 0;
- strncpy(hid->phys, ev->u.create.phys, 63);
- hid->phys[63] = 0;
- strncpy(hid->uniq, ev->u.create.uniq, 63);
- hid->uniq[63] = 0;
+ len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
+ strncpy(hid->name, ev->u.create2.name, len);
+ len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
+ strncpy(hid->phys, ev->u.create2.phys, len);
+ len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
+ strncpy(hid->uniq, ev->u.create2.uniq, len);
hid->ll_driver = &uhid_hid_driver;
- hid->bus = ev->u.create.bus;
- hid->vendor = ev->u.create.vendor;
- hid->product = ev->u.create.product;
- hid->version = ev->u.create.version;
- hid->country = ev->u.create.country;
+ hid->bus = ev->u.create2.bus;
+ hid->vendor = ev->u.create2.vendor;
+ hid->product = ev->u.create2.product;
+ hid->version = ev->u.create2.version;
+ hid->country = ev->u.create2.country;
hid->driver_data = uhid;
hid->dev.parent = uhid_misc.this_device;
@@ -425,67 +512,34 @@ err_hid:
uhid->running = false;
err_free:
kfree(uhid->rd_data);
+ uhid->rd_data = NULL;
+ uhid->rd_size = 0;
return ret;
}
-static int uhid_dev_create2(struct uhid_device *uhid,
- const struct uhid_event *ev)
+static int uhid_dev_create(struct uhid_device *uhid,
+ struct uhid_event *ev)
{
- struct hid_device *hid;
- int ret;
+ struct uhid_create_req orig;
- if (uhid->running)
- return -EALREADY;
+ orig = ev->u.create;
- uhid->rd_size = ev->u.create2.rd_size;
- if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
+ if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
return -EINVAL;
+ if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
+ return -EFAULT;
- uhid->rd_data = kmemdup(ev->u.create2.rd_data, uhid->rd_size,
- GFP_KERNEL);
- if (!uhid->rd_data)
- return -ENOMEM;
-
- hid = hid_allocate_device();
- if (IS_ERR(hid)) {
- ret = PTR_ERR(hid);
- goto err_free;
- }
-
- strncpy(hid->name, ev->u.create2.name, 127);
- hid->name[127] = 0;
- strncpy(hid->phys, ev->u.create2.phys, 63);
- hid->phys[63] = 0;
- strncpy(hid->uniq, ev->u.create2.uniq, 63);
- hid->uniq[63] = 0;
-
- hid->ll_driver = &uhid_hid_driver;
- hid->bus = ev->u.create2.bus;
- hid->vendor = ev->u.create2.vendor;
- hid->product = ev->u.create2.product;
- hid->version = ev->u.create2.version;
- hid->country = ev->u.create2.country;
- hid->driver_data = uhid;
- hid->dev.parent = uhid_misc.this_device;
-
- uhid->hid = hid;
- uhid->running = true;
-
- ret = hid_add_device(hid);
- if (ret) {
- hid_err(hid, "Cannot register HID device\n");
- goto err_hid;
- }
-
- return 0;
-
-err_hid:
- hid_destroy_device(hid);
- uhid->hid = NULL;
- uhid->running = false;
-err_free:
- kfree(uhid->rd_data);
- return ret;
+ memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
+ memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
+ memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
+ ev->u.create2.rd_size = orig.rd_size;
+ ev->u.create2.bus = orig.bus;
+ ev->u.create2.vendor = orig.vendor;
+ ev->u.create2.product = orig.product;
+ ev->u.create2.version = orig.version;
+ ev->u.create2.country = orig.country;
+
+ return uhid_dev_create2(uhid, ev);
}
static int uhid_dev_destroy(struct uhid_device *uhid)
@@ -493,10 +547,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
if (!uhid->running)
return -EINVAL;
- /* clear "running" before setting "report_done" */
uhid->running = false;
- smp_wmb();
- atomic_set(&uhid->report_done, 1);
wake_up_interruptible(&uhid->report_wait);
hid_destroy_device(uhid->hid);
@@ -527,28 +578,23 @@ static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
return 0;
}
-static int uhid_dev_feature_answer(struct uhid_device *uhid,
- struct uhid_event *ev)
+static int uhid_dev_get_report_reply(struct uhid_device *uhid,
+ struct uhid_event *ev)
{
- unsigned long flags;
-
if (!uhid->running)
return -EINVAL;
- spin_lock_irqsave(&uhid->qlock, flags);
-
- /* id for old report; drop it silently */
- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
- goto unlock;
- if (atomic_read(&uhid->report_done))
- goto unlock;
+ uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
+ return 0;
+}
- memcpy(&uhid->report_buf, ev, sizeof(*ev));
- atomic_set(&uhid->report_done, 1);
- wake_up_interruptible(&uhid->report_wait);
+static int uhid_dev_set_report_reply(struct uhid_device *uhid,
+ struct uhid_event *ev)
+{
+ if (!uhid->running)
+ return -EINVAL;
-unlock:
- spin_unlock_irqrestore(&uhid->qlock, flags);
+ uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
return 0;
}
@@ -566,7 +612,6 @@ static int uhid_char_open(struct inode *inode, struct file *file)
init_waitqueue_head(&uhid->waitq);
init_waitqueue_head(&uhid->report_wait);
uhid->running = false;
- atomic_set(&uhid->report_done, 1);
file->private_data = uhid;
nonseekable_open(inode, file);
@@ -675,8 +720,11 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
case UHID_INPUT2:
ret = uhid_dev_input2(uhid, &uhid->input_buf);
break;
- case UHID_FEATURE_ANSWER:
- ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
+ case UHID_GET_REPORT_REPLY:
+ ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
+ break;
+ case UHID_SET_REPORT_REPLY:
+ ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
break;
default:
ret = -EOPNOTSUPP;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 79cf503e37bf..ca6849a0121e 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
spin_lock_irqsave(&usbhid->lock, flags);
- if (hid->open > 0 &&
+ if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
!test_bit(HID_SUSPENDED, &usbhid->iofl) &&
!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
@@ -116,40 +116,24 @@ static void hid_reset(struct work_struct *work)
struct usbhid_device *usbhid =
container_of(work, struct usbhid_device, reset_work);
struct hid_device *hid = usbhid->hid;
- int rc = 0;
+ int rc;
if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) {
dev_dbg(&usbhid->intf->dev, "clear halt\n");
rc = usb_clear_halt(hid_to_usb_dev(hid), usbhid->urbin->pipe);
clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
- hid_start_in(hid);
- }
-
- else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) {
- dev_dbg(&usbhid->intf->dev, "resetting device\n");
- rc = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
if (rc == 0) {
- rc = usb_reset_device(hid_to_usb_dev(hid));
- usb_unlock_device(hid_to_usb_dev(hid));
+ hid_start_in(hid);
+ } else {
+ dev_dbg(&usbhid->intf->dev,
+ "clear-halt failed: %d\n", rc);
+ set_bit(HID_RESET_PENDING, &usbhid->iofl);
}
- clear_bit(HID_RESET_PENDING, &usbhid->iofl);
}
- switch (rc) {
- case 0:
- if (!test_bit(HID_IN_RUNNING, &usbhid->iofl))
- hid_io_error(hid);
- break;
- default:
- hid_err(hid, "can't reset device, %s-%s/input%d, status %d\n",
- hid_to_usb_dev(hid)->bus->bus_name,
- hid_to_usb_dev(hid)->devpath,
- usbhid->ifnum, rc);
- /* FALLTHROUGH */
- case -EHOSTUNREACH:
- case -ENODEV:
- case -EINTR:
- break;
+ if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) {
+ dev_dbg(&usbhid->intf->dev, "resetting device\n");
+ usb_queue_reset_device(usbhid->intf);
}
}
@@ -292,6 +276,8 @@ static void hid_irq_in(struct urb *urb)
case 0: /* success */
usbhid_mark_busy(usbhid);
usbhid->retry_delay = 0;
+ if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
+ break;
hid_input_report(urb->context, HID_INPUT_REPORT,
urb->transfer_buffer,
urb->actual_length, 1);
@@ -735,8 +721,10 @@ void usbhid_close(struct hid_device *hid)
if (!--hid->open) {
spin_unlock_irq(&usbhid->lock);
hid_cancel_delayed_stuff(usbhid);
- usb_kill_urb(usbhid->urbin);
- usbhid->intf->needs_remote_wakeup = 0;
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
+ usb_kill_urb(usbhid->urbin);
+ usbhid->intf->needs_remote_wakeup = 0;
+ }
} else {
spin_unlock_irq(&usbhid->lock);
}
@@ -1134,6 +1122,19 @@ static int usbhid_start(struct hid_device *hid)
set_bit(HID_STARTED, &usbhid->iofl);
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
+ ret = usb_autopm_get_interface(usbhid->intf);
+ if (ret)
+ goto fail;
+ usbhid->intf->needs_remote_wakeup = 1;
+ ret = hid_start_in(hid);
+ if (ret) {
+ dev_err(&hid->dev,
+ "failed to start in urb: %d\n", ret);
+ }
+ usb_autopm_put_interface(usbhid->intf);
+ }
+
/* Some keyboards don't work until their LEDs have been set.
* Since BIOSes do set the LEDs, it must be safe for any device
* that supports the keyboard boot protocol.
@@ -1166,6 +1167,9 @@ static void usbhid_stop(struct hid_device *hid)
if (WARN_ON(!usbhid))
return;
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+ usbhid->intf->needs_remote_wakeup = 0;
+
clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 15225f3eaed1..f3cb5b0a4345 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
@@ -79,6 +80,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 64bc1b296d91..0cc53440543a 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -89,6 +89,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/hid.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
#include <asm/unaligned.h>
@@ -143,4 +144,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
+void wacom_wac_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage);
+int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value);
+void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index f0db7eca9023..8593047bb726 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -13,23 +13,26 @@
#include "wacom_wac.h"
#include "wacom.h"
-#include <linux/hid.h>
#define WAC_MSG_RETRIES 5
+#define WAC_CMD_WL_LED_CONTROL 0x03
#define WAC_CMD_LED_CONTROL 0x20
#define WAC_CMD_ICON_START 0x21
#define WAC_CMD_ICON_XFER 0x23
#define WAC_CMD_ICON_BT_XFER 0x26
#define WAC_CMD_RETRIES 10
-static int wacom_get_report(struct hid_device *hdev, u8 type, u8 id,
- void *buf, size_t size, unsigned int retries)
+#define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
+#define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
+
+static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf,
+ size_t size, unsigned int retries)
{
int retval;
do {
- retval = hid_hw_raw_request(hdev, id, buf, size, type,
+ retval = hid_hw_raw_request(hdev, buf[0], buf, size, type,
HID_REQ_GET_REPORT);
} while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
@@ -106,12 +109,35 @@ static void wacom_feature_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
+ struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
+ u8 *data;
+ int ret;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
- if (!features->touch_max)
- features->touch_max = field->value[0];
+ if (!features->touch_max) {
+ /* read manually */
+ data = kzalloc(2, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, 2, 0);
+ if (ret == 2)
+ features->touch_max = data[1];
+ kfree(data);
+ }
+ break;
+ case HID_DG_INPUTMODE:
+ /* Ignore if value index is out of bounds. */
+ if (usage->usage_index >= field->report_count) {
+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
+ break;
+ }
+
+ hid_data->inputmode = field->report->id;
+ hid_data->inputmode_index = usage->usage_index;
break;
}
}
@@ -199,6 +225,9 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->pressure_max = field->logical_maximum;
break;
}
+
+ if (features->type == HID_GENERIC)
+ wacom_wac_usage_mapping(hdev, field, usage);
}
static void wacom_parse_hid(struct hid_device *hdev,
@@ -237,6 +266,25 @@ static void wacom_parse_hid(struct hid_device *hdev,
}
}
+static int wacom_hid_set_device_mode(struct hid_device *hdev)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+
+ if (hid_data->inputmode < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[hid_data->inputmode];
+ if (r) {
+ r->field[0]->value[hid_data->inputmode_index] = 2;
+ hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
+ }
+ return 0;
+}
+
static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
int length, int mode)
{
@@ -255,7 +303,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
- report_id, rep_data, length, 1);
+ rep_data, length, 1);
} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -329,6 +377,9 @@ static int wacom_query_tablet_data(struct hid_device *hdev,
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
if (features->device_type == BTN_TOOL_FINGER) {
if (features->type > TABLETPC) {
/* MT Tablet PC touch */
@@ -487,8 +538,14 @@ static int wacom_led_control(struct wacom *wacom)
{
unsigned char *buf;
int retval;
+ unsigned char report_id = WAC_CMD_LED_CONTROL;
+ int buf_size = 9;
- buf = kzalloc(9, GFP_KERNEL);
+ if (wacom->wacom_wac.pid) { /* wireless connected */
+ report_id = WAC_CMD_WL_LED_CONTROL;
+ buf_size = 13;
+ }
+ buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -502,9 +559,16 @@ static int wacom_led_control(struct wacom *wacom)
int ring_led = wacom->led.select[0] & 0x03;
int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03;
int crop_lum = 0;
-
- buf[0] = WAC_CMD_LED_CONTROL;
- buf[1] = (crop_lum << 4) | (ring_lum << 2) | (ring_led);
+ unsigned char led_bits = (crop_lum << 4) | (ring_lum << 2) | (ring_led);
+
+ buf[0] = report_id;
+ if (wacom->wacom_wac.pid) {
+ wacom_get_report(wacom->hdev, HID_FEATURE_REPORT,
+ buf, buf_size, WAC_CMD_RETRIES);
+ buf[0] = report_id;
+ buf[4] = led_bits;
+ } else
+ buf[1] = led_bits;
}
else {
int led = wacom->led.select[0] | 0x4;
@@ -513,14 +577,14 @@ static int wacom_led_control(struct wacom *wacom)
wacom->wacom_wac.features.type == WACOM_24HD)
led |= (wacom->led.select[1] << 4) | 0x40;
- buf[0] = WAC_CMD_LED_CONTROL;
+ buf[0] = report_id;
buf[1] = led;
buf[2] = wacom->led.llv;
buf[3] = wacom->led.hlv;
buf[4] = wacom->led.img_lum;
}
- retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 9,
+ retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, buf_size,
WAC_CMD_RETRIES);
kfree(buf);
@@ -602,9 +666,10 @@ static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \
{ \
struct hid_device *hdev = container_of(dev, struct hid_device, dev);\
struct wacom *wacom = hid_get_drvdata(hdev); \
- return snprintf(buf, 2, "%d\n", wacom->led.select[SET_ID]); \
+ return scnprintf(buf, PAGE_SIZE, "%d\n", \
+ wacom->led.select[SET_ID]); \
} \
-static DEVICE_ATTR(status_led##SET_ID##_select, S_IWUSR | S_IRUSR, \
+static DEVICE_ATTR(status_led##SET_ID##_select, DEV_ATTR_RW_PERM, \
wacom_led##SET_ID##_select_show, \
wacom_led##SET_ID##_select_store)
@@ -641,8 +706,15 @@ static ssize_t wacom_##name##_luminance_store(struct device *dev, \
return wacom_luminance_store(wacom, &wacom->led.field, \
buf, count); \
} \
-static DEVICE_ATTR(name##_luminance, S_IWUSR, \
- NULL, wacom_##name##_luminance_store)
+static ssize_t wacom_##name##_luminance_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wacom *wacom = dev_get_drvdata(dev); \
+ return scnprintf(buf, PAGE_SIZE, "%d\n", wacom->led.field); \
+} \
+static DEVICE_ATTR(name##_luminance, DEV_ATTR_RW_PERM, \
+ wacom_##name##_luminance_show, \
+ wacom_##name##_luminance_store)
DEVICE_LUMINANCE_ATTR(status0, llv);
DEVICE_LUMINANCE_ATTR(status1, hlv);
@@ -683,7 +755,7 @@ static ssize_t wacom_btnimg##BUTTON_ID##_store(struct device *dev, \
{ \
return wacom_button_image_store(dev, BUTTON_ID, buf, count); \
} \
-static DEVICE_ATTR(button##BUTTON_ID##_rawimg, S_IWUSR, \
+static DEVICE_ATTR(button##BUTTON_ID##_rawimg, DEV_ATTR_WO_PERM, \
NULL, wacom_btnimg##BUTTON_ID##_store)
DEVICE_BTNIMG_ATTR(0);
@@ -989,7 +1061,7 @@ static ssize_t wacom_store_speed(struct device *dev,
return count;
}
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
+static DEVICE_ATTR(speed, DEV_ATTR_RW_PERM,
wacom_show_speed, wacom_store_speed);
static struct input_dev *wacom_allocate_input(struct wacom *wacom)
@@ -1010,47 +1082,82 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
input_dev->uniq = hdev->uniq;
input_dev->id.bustype = hdev->bus;
input_dev->id.vendor = hdev->vendor;
- input_dev->id.product = hdev->product;
+ input_dev->id.product = wacom_wac->pid ? wacom_wac->pid : hdev->product;
input_dev->id.version = hdev->version;
input_set_drvdata(input_dev, wacom);
return input_dev;
}
-static void wacom_unregister_inputs(struct wacom *wacom)
+static void wacom_free_inputs(struct wacom *wacom)
{
- if (wacom->wacom_wac.input)
- input_unregister_device(wacom->wacom_wac.input);
- if (wacom->wacom_wac.pad_input)
- input_unregister_device(wacom->wacom_wac.pad_input);
- wacom->wacom_wac.input = NULL;
- wacom->wacom_wac.pad_input = NULL;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+
+ if (wacom_wac->input)
+ input_free_device(wacom_wac->input);
+ if (wacom_wac->pad_input)
+ input_free_device(wacom_wac->pad_input);
+ wacom_wac->input = NULL;
+ wacom_wac->pad_input = NULL;
}
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_allocate_inputs(struct wacom *wacom)
{
struct input_dev *input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- int error;
input_dev = wacom_allocate_input(wacom);
pad_input_dev = wacom_allocate_input(wacom);
if (!input_dev || !pad_input_dev) {
- error = -ENOMEM;
- goto fail1;
+ wacom_free_inputs(wacom);
+ return -ENOMEM;
}
wacom_wac->input = input_dev;
wacom_wac->pad_input = pad_input_dev;
wacom_wac->pad_input->name = wacom_wac->pad_name;
+ return 0;
+}
+
+static void wacom_clean_inputs(struct wacom *wacom)
+{
+ if (wacom->wacom_wac.input) {
+ if (wacom->wacom_wac.input_registered)
+ input_unregister_device(wacom->wacom_wac.input);
+ else
+ input_free_device(wacom->wacom_wac.input);
+ }
+ if (wacom->wacom_wac.pad_input) {
+ if (wacom->wacom_wac.input_registered)
+ input_unregister_device(wacom->wacom_wac.pad_input);
+ else
+ input_free_device(wacom->wacom_wac.pad_input);
+ }
+ wacom->wacom_wac.input = NULL;
+ wacom->wacom_wac.pad_input = NULL;
+ wacom_destroy_leds(wacom);
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error;
+
+ input_dev = wacom_wac->input;
+ pad_input_dev = wacom_wac->pad_input;
+
+ if (!input_dev || !pad_input_dev)
+ return -EINVAL;
+
error = wacom_setup_input_capabilities(input_dev, wacom_wac);
if (error)
- goto fail2;
+ return error;
error = input_register_device(input_dev);
if (error)
- goto fail2;
+ return error;
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
if (error) {
@@ -1061,22 +1168,23 @@ static int wacom_register_inputs(struct wacom *wacom)
} else {
error = input_register_device(pad_input_dev);
if (error)
- goto fail3;
+ goto fail_register_pad_input;
+
+ error = wacom_initialize_leds(wacom);
+ if (error)
+ goto fail_leds;
}
+ wacom_wac->input_registered = true;
+
return 0;
-fail3:
+fail_leds:
+ input_unregister_device(pad_input_dev);
+ pad_input_dev = NULL;
+fail_register_pad_input:
input_unregister_device(input_dev);
- input_dev = NULL;
-fail2:
wacom_wac->input = NULL;
- wacom_wac->pad_input = NULL;
-fail1:
- if (input_dev)
- input_free_device(input_dev);
- if (pad_input_dev)
- input_free_device(pad_input_dev);
return error;
}
@@ -1101,13 +1209,13 @@ static void wacom_wireless_work(struct work_struct *work)
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
wacom1 = hid_get_drvdata(hdev1);
wacom_wac1 = &(wacom1->wacom_wac);
- wacom_unregister_inputs(wacom1);
+ wacom_clean_inputs(wacom1);
/* Touch interface */
hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
wacom2 = hid_get_drvdata(hdev2);
wacom_wac2 = &(wacom2->wacom_wac);
- wacom_unregister_inputs(wacom2);
+ wacom_clean_inputs(wacom2);
if (wacom_wac->pid == 0) {
hid_info(wacom->hdev, "wireless tablet disconnected\n");
@@ -1140,7 +1248,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac1->features.name);
wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
wacom_wac1->shared->type = wacom_wac1->features.type;
- error = wacom_register_inputs(wacom1);
+ wacom_wac1->pid = wacom_wac->pid;
+ error = wacom_allocate_inputs(wacom1) ||
+ wacom_register_inputs(wacom1);
if (error)
goto fail;
@@ -1160,7 +1270,9 @@ static void wacom_wireless_work(struct work_struct *work)
"%s (WL) Pad",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
"%s (WL) Pad", wacom_wac2->features.name);
- error = wacom_register_inputs(wacom2);
+ wacom_wac2->pid = wacom_wac->pid;
+ error = wacom_allocate_inputs(wacom2) ||
+ wacom_register_inputs(wacom2);
if (error)
goto fail;
@@ -1177,8 +1289,8 @@ static void wacom_wireless_work(struct work_struct *work)
return;
fail:
- wacom_unregister_inputs(wacom1);
- wacom_unregister_inputs(wacom2);
+ wacom_clean_inputs(wacom1);
+ wacom_clean_inputs(wacom2);
return;
}
@@ -1241,10 +1353,13 @@ static int wacom_probe(struct hid_device *hdev,
struct wacom_wac *wacom_wac;
struct wacom_features *features;
int error;
+ unsigned int connect_mask = HID_CONNECT_HIDRAW;
if (!id->driver_data)
return -EINVAL;
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
if (!wacom)
return -ENOMEM;
@@ -1256,7 +1371,7 @@ static int wacom_probe(struct hid_device *hdev,
error = hid_parse(hdev);
if (error) {
hid_err(hdev, "parse failed\n");
- goto fail1;
+ goto fail_parse;
}
wacom_wac = &wacom->wacom_wac;
@@ -1265,12 +1380,12 @@ static int wacom_probe(struct hid_device *hdev,
features->pktlen = wacom_compute_pktlen(hdev);
if (features->pktlen > WACOM_PKGLEN_MAX) {
error = -EINVAL;
- goto fail1;
+ goto fail_pktlen;
}
if (features->check_for_hid_type && features->hid_type != hdev->type) {
error = -ENODEV;
- goto fail1;
+ goto fail_type;
}
wacom->usbdev = dev;
@@ -1278,6 +1393,12 @@ static int wacom_probe(struct hid_device *hdev,
mutex_init(&wacom->lock);
INIT_WORK(&wacom->work, wacom_wireless_work);
+ if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
+ error = wacom_allocate_inputs(wacom);
+ if (error)
+ goto fail_allocate_inputs;
+ }
+
/* set the default size in case we do not get them from hid */
wacom_set_default_phy(features);
@@ -1339,24 +1460,20 @@ static int wacom_probe(struct hid_device *hdev,
error = wacom_add_shared_data(hdev);
if (error)
- goto fail1;
+ goto fail_shared_data;
}
- error = wacom_initialize_leds(wacom);
- if (error)
- goto fail2;
-
if (!(features->quirks & WACOM_QUIRK_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
error = wacom_initialize_battery(wacom);
if (error)
- goto fail3;
+ goto fail_battery;
}
if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
error = wacom_register_inputs(wacom);
if (error)
- goto fail4;
+ goto fail_register_inputs;
}
if (hdev->bus == BUS_BLUETOOTH) {
@@ -1367,16 +1484,19 @@ static int wacom_probe(struct hid_device *hdev,
error);
}
- /* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(hdev, features);
+ if (features->type == HID_GENERIC)
+ connect_mask |= HID_CONNECT_DRIVER;
/* Regular HID work starts now */
- error = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ error = hid_hw_start(hdev, connect_mask);
if (error) {
hid_err(hdev, "hw start failed\n");
- goto fail5;
+ goto fail_hw_start;
}
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(hdev, features);
+
if (features->quirks & WACOM_QUIRK_MONITOR)
error = hid_hw_open(hdev);
@@ -1387,13 +1507,21 @@ static int wacom_probe(struct hid_device *hdev,
return 0;
- fail5: if (hdev->bus == BUS_BLUETOOTH)
+fail_hw_start:
+ if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
- wacom_unregister_inputs(wacom);
- fail4: wacom_destroy_battery(wacom);
- fail3: wacom_destroy_leds(wacom);
- fail2: wacom_remove_shared_data(wacom_wac);
- fail1: kfree(wacom);
+fail_register_inputs:
+ wacom_clean_inputs(wacom);
+ wacom_destroy_battery(wacom);
+fail_battery:
+ wacom_remove_shared_data(wacom_wac);
+fail_shared_data:
+ wacom_clean_inputs(wacom);
+fail_allocate_inputs:
+fail_type:
+fail_pktlen:
+fail_parse:
+ kfree(wacom);
hid_set_drvdata(hdev, NULL);
return error;
}
@@ -1405,11 +1533,10 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
cancel_work_sync(&wacom->work);
- wacom_unregister_inputs(wacom);
+ wacom_clean_inputs(wacom);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
wacom_destroy_battery(wacom);
- wacom_destroy_leds(wacom);
wacom_remove_shared_data(&wacom->wacom_wac);
hid_set_drvdata(hdev, NULL);
@@ -1444,6 +1571,8 @@ static struct hid_driver wacom_driver = {
.id_table = wacom_ids,
.probe = wacom_probe,
.remove = wacom_remove,
+ .event = wacom_wac_event,
+ .report = wacom_wac_report,
#ifdef CONFIG_PM
.resume = wacom_resume,
.reset_resume = wacom_reset_resume,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index aa6a08eb7ad6..586b2405b0d4 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1248,6 +1248,296 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static void wacom_map_usage(struct wacom *wacom, struct hid_usage *usage,
+ struct hid_field *field, __u8 type, __u16 code, int fuzz)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+ int fmin = field->logical_minimum;
+ int fmax = field->logical_maximum;
+
+ usage->type = type;
+ usage->code = code;
+
+ set_bit(type, input->evbit);
+
+ switch (type) {
+ case EV_ABS:
+ input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+ input_abs_set_res(input, code,
+ hidinput_calc_abs_res(field, code));
+ break;
+ case EV_KEY:
+ input_set_capability(input, EV_KEY, code);
+ break;
+ case EV_MSC:
+ input_set_capability(input, EV_MSC, code);
+ break;
+ }
+}
+
+static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+
+ switch (usage->hid) {
+ case HID_GD_X:
+ wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
+ break;
+ case HID_GD_Y:
+ wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
+ break;
+ case HID_DG_TIPPRESSURE:
+ wacom_map_usage(wacom, usage, field, EV_ABS, ABS_PRESSURE, 0);
+ break;
+ case HID_DG_INRANGE:
+ wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case HID_DG_INVERT:
+ wacom_map_usage(wacom, usage, field, EV_KEY,
+ BTN_TOOL_RUBBER, 0);
+ break;
+ case HID_DG_ERASER:
+ case HID_DG_TIPSWITCH:
+ wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
+ break;
+ case HID_DG_BARRELSWITCH:
+ wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS, 0);
+ break;
+ case HID_DG_BARRELSWITCH2:
+ wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS2, 0);
+ break;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_map_usage(wacom, usage, field, EV_MSC, MSC_SERIAL, 0);
+ break;
+ }
+}
+
+static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+
+ /* checking which Tool / tip switch to send */
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ wacom_wac->hid_data.inrange_state = value;
+ return 0;
+ case HID_DG_INVERT:
+ wacom_wac->hid_data.invert_state = value;
+ return 0;
+ case HID_DG_ERASER:
+ case HID_DG_TIPSWITCH:
+ wacom_wac->hid_data.tipswitch |= value;
+ return 0;
+ }
+
+ /* send pen events only when touch is up or forced out */
+ if (!usage->type || wacom_wac->shared->touch_down)
+ return 0;
+
+ input_event(input, usage->type, usage->code, value);
+
+ return 0;
+}
+
+static void wacom_wac_pen_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+ bool prox = wacom_wac->hid_data.inrange_state;
+
+ if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ /* Going into proximity select tool */
+ wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
+ BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+
+ /* keep pen state for touch events */
+ wacom_wac->shared->stylus_in_proximity = prox;
+
+ /* send pen events only when touch is up or forced out */
+ if (!wacom_wac->shared->touch_down) {
+ input_report_key(input, BTN_TOUCH,
+ wacom_wac->hid_data.tipswitch);
+ input_report_key(input, wacom_wac->tool[0], prox);
+
+ wacom_wac->hid_data.tipswitch = false;
+
+ input_sync(input);
+ }
+}
+
+static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+ unsigned touch_max = wacom_wac->features.touch_max;
+
+ switch (usage->hid) {
+ case HID_GD_X:
+ if (touch_max == 1)
+ wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
+ else
+ wacom_map_usage(wacom, usage, field, EV_ABS,
+ ABS_MT_POSITION_X, 4);
+ break;
+ case HID_GD_Y:
+ if (touch_max == 1)
+ wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
+ else
+ wacom_map_usage(wacom, usage, field, EV_ABS,
+ ABS_MT_POSITION_Y, 4);
+ break;
+ case HID_DG_CONTACTID:
+ input_mt_init_slots(input, wacom_wac->features.touch_max,
+ INPUT_MT_DIRECT);
+ break;
+ case HID_DG_INRANGE:
+ break;
+ case HID_DG_INVERT:
+ break;
+ case HID_DG_TIPSWITCH:
+ wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
+ break;
+ }
+}
+
+static int wacom_wac_finger_event(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ switch (usage->hid) {
+ case HID_GD_X:
+ wacom_wac->hid_data.x = value;
+ break;
+ case HID_GD_Y:
+ wacom_wac->hid_data.y = value;
+ break;
+ case HID_DG_CONTACTID:
+ wacom_wac->hid_data.id = value;
+ break;
+ case HID_DG_TIPSWITCH:
+ wacom_wac->hid_data.tipswitch = value;
+ break;
+ }
+
+
+ return 0;
+}
+
+static void wacom_wac_finger_mt_report(struct wacom_wac *wacom_wac,
+ struct input_dev *input, bool touch)
+{
+ int slot;
+ struct hid_data *hid_data = &wacom_wac->hid_data;
+
+ slot = input_mt_get_slot_by_key(input, hid_data->id);
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+ if (touch) {
+ input_report_abs(input, ABS_MT_POSITION_X, hid_data->x);
+ input_report_abs(input, ABS_MT_POSITION_Y, hid_data->y);
+ }
+ input_mt_sync_frame(input);
+}
+
+static void wacom_wac_finger_single_touch_report(struct wacom_wac *wacom_wac,
+ struct input_dev *input, bool touch)
+{
+ struct hid_data *hid_data = &wacom_wac->hid_data;
+
+ if (touch) {
+ input_report_abs(input, ABS_X, hid_data->x);
+ input_report_abs(input, ABS_Y, hid_data->y);
+ }
+ input_report_key(input, BTN_TOUCH, touch);
+}
+
+static void wacom_wac_finger_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+ bool touch = wacom_wac->hid_data.tipswitch &&
+ !wacom_wac->shared->stylus_in_proximity;
+ unsigned touch_max = wacom_wac->features.touch_max;
+
+ if (touch_max > 1)
+ wacom_wac_finger_mt_report(wacom_wac, input, touch);
+ else
+ wacom_wac_finger_single_touch_report(wacom_wac, input, touch);
+ input_sync(input);
+
+ /* keep touch state for pen event */
+ wacom_wac->shared->touch_down = touch;
+}
+
+#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
+ ((f)->physical == HID_DG_STYLUS))
+#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
+ ((f)->physical == HID_DG_FINGER))
+
+void wacom_wac_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+
+ /* currently, only direct devices have proper hid report descriptors */
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+
+ if (WACOM_PEN_FIELD(field))
+ return wacom_wac_pen_usage_mapping(hdev, field, usage);
+
+ if (WACOM_FINGER_FIELD(field))
+ return wacom_wac_finger_usage_mapping(hdev, field, usage);
+}
+
+int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+
+ if (wacom->wacom_wac.features.type != HID_GENERIC)
+ return 0;
+
+ if (WACOM_PEN_FIELD(field))
+ return wacom_wac_pen_event(hdev, field, usage, value);
+
+ if (WACOM_FINGER_FIELD(field))
+ return wacom_wac_finger_event(hdev, field, usage, value);
+
+ return 0;
+}
+
+void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_field *field = report->field[0];
+
+ if (wacom_wac->features.type != HID_GENERIC)
+ return;
+
+ if (WACOM_PEN_FIELD(field))
+ return wacom_wac_pen_report(hdev, report);
+
+ if (WACOM_FINGER_FIELD(field))
+ return wacom_wac_finger_report(hdev, report);
+}
+
static int wacom_bpt_touch(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -1746,6 +2036,10 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ if (features->type == HID_GENERIC)
+ /* setup has already been done */
+ return 0;
+
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -1990,6 +2284,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ /* kept for making udev and libwacom accepting the pad */
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+
switch (features->type) {
case GRAPHIRE_BT:
__set_bit(BTN_0, input_dev->keybit);
@@ -2573,6 +2870,17 @@ static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x30A =
+ { "Wacom ISDv5 30A", 59352, 33648, 2047, 63,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
+static const struct wacom_features wacom_features_0x30C =
+ { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+
+static const struct wacom_features wacom_features_HID_ANY_ID =
+ { "Wacom HID", .type = HID_GENERIC };
#define USB_DEVICE_WACOM(prod) \
HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
@@ -2708,6 +3016,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_WACOM(0x307) },
{ USB_DEVICE_WACOM(0x309) },
+ { USB_DEVICE_WACOM(0x30A) },
+ { USB_DEVICE_WACOM(0x30C) },
{ USB_DEVICE_WACOM(0x30E) },
{ USB_DEVICE_WACOM(0x314) },
{ USB_DEVICE_WACOM(0x315) },
@@ -2716,6 +3026,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
{ USB_DEVICE_WACOM(0x5002) },
+
+ { USB_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_ids);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 339ab5d81a2d..0f0b85ec1322 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -113,6 +113,7 @@ enum {
MTSCREEN,
MTTPC,
MTTPC_B,
+ HID_GENERIC,
MAX_TYPE
};
@@ -154,6 +155,20 @@ struct wacom_shared {
struct input_dev *touch_input;
};
+struct hid_data {
+ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool inrange_state;
+ bool invert_state;
+ bool tipswitch;
+ int x;
+ int y;
+ int pressure;
+ int width;
+ int height;
+ int id;
+};
+
struct wacom_wac {
char name[WACOM_NAME_MAX];
char pad_name[WACOM_NAME_MAX];
@@ -167,6 +182,7 @@ struct wacom_wac {
struct wacom_shared *shared;
struct input_dev *input;
struct input_dev *pad_input;
+ bool input_registered;
int pid;
int battery_capacity;
int num_contacts_left;
@@ -174,6 +190,7 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ struct hid_data hid_data;
};
#endif
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index e0228b228256..1722f50f2473 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -2,11 +2,8 @@
# Makefile for the i2c core.
#
-i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_ACPI) += i2c-acpi.o
-
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
-obj-$(CONFIG_I2C) += i2ccore.o
+obj-$(CONFIG_I2C) += i2c-core.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2ac87fa3058d..2e45ae3796f1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -357,7 +357,7 @@ config I2C_BCM_KONA
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Kona family of processors.
- If you do not need KONA I2C inteface, say N.
+ If you do not need KONA I2C interface, say N.
config I2C_BLACKFIN_TWI
tristate "Blackfin TWI I2C support"
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 984492553e95..d9ee43c80cde 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
desc->wr_len_cmd = dma_size;
desc->control |= ISMT_DESC_BLK;
priv->dma_buffer[0] = command;
- memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
} else {
/* Block Read */
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
@@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
desc->wr_len_cmd = dma_size;
desc->control |= ISMT_DESC_I2C;
priv->dma_buffer[0] = command;
- memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
} else {
/* i2c Block Read */
dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n");
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7170fc892829..65a21fed08b5 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
ret = mxs_i2c_pio_wait_xfer_end(i2c);
if (ret) {
dev_err(i2c->dev,
- "PIO: Failed to send SELECT command!\n");
+ "PIO: Failed to send READ command!\n");
goto cleanup;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 3a4d64e1dfb1..092d89bd3224 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -674,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev)
qup->adap.dev.of_node = pdev->dev.of_node;
strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
- ret = i2c_add_adapter(&qup->adap);
- if (ret)
- goto fail;
-
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(qup->dev);
pm_runtime_set_active(qup->dev);
pm_runtime_enable(qup->dev);
+
+ ret = i2c_add_adapter(&qup->adap);
+ if (ret)
+ goto fail_runtime;
+
return 0;
+fail_runtime:
+ pm_runtime_disable(qup->dev);
+ pm_runtime_set_suspended(qup->dev);
fail:
qup_i2c_disable_clocks(qup);
return ret;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 1cc146cfc1f3..e506fcd3ca04 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -76,8 +76,8 @@
#define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR)
#define RCAR_IRQ_STOP (MST)
-#define RCAR_IRQ_ACK_SEND (~(MAT | MDE))
-#define RCAR_IRQ_ACK_RECV (~(MAT | MDR))
+#define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0xFF)
+#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF)
#define ID_LAST_MSG (1 << 0)
#define ID_IOERROR (1 << 1)
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index e637c32ae517..b38b0529946a 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
for (i = 0; i < 8; ++i) {
val = 0;
for (j = 0; j < 4; ++j) {
- if (i2c->processed == i2c->msg->len)
+ if ((i2c->processed == i2c->msg->len) && (cnt != 0))
break;
if (i2c->processed == 0 && cnt == 0)
@@ -433,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
unsigned long i2c_rate = clk_get_rate(i2c->clk);
unsigned int div;
- /* SCL rate = (clk rate) / (8 * DIV) */
- div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
-
- /* The lower and upper half of the CLKDIV reg describe the length of
- * SCL low & high periods. */
- div = DIV_ROUND_UP(div, 2);
+ /* set DIV = DIVH = DIVL
+ * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1))
+ * = (clk rate) / (16 * (DIV + 1))
+ */
+ div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1;
i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
}
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index e086fb075f2b..e3b0337faeb7 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -244,7 +244,7 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
}
usleep_range(1000, 2000);
}
- dev_err(i2c->dev, "ack was not recieved\n");
+ dev_err(i2c->dev, "ack was not received\n");
return false;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 87d0371cebb7..efba1ebe16ba 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
{
int ret;
if (!i2c_dev->hw->has_single_clk_source) {
- ret = clk_prepare_enable(i2c_dev->fast_clk);
+ ret = clk_enable(i2c_dev->fast_clk);
if (ret < 0) {
dev_err(i2c_dev->dev,
"Enabling fast clk failed, err %d\n", ret);
return ret;
}
}
- ret = clk_prepare_enable(i2c_dev->div_clk);
+ ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev,
"Enabling div clk failed, err %d\n", ret);
- clk_disable_unprepare(i2c_dev->fast_clk);
+ clk_disable(i2c_dev->fast_clk);
}
return ret;
}
static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
{
- clk_disable_unprepare(i2c_dev->div_clk);
+ clk_disable(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
- clk_disable_unprepare(i2c_dev->fast_clk);
+ clk_disable(i2c_dev->fast_clk);
}
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
int err = 0;
- int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
u32 clk_divisor;
err = tegra_i2c_clock_enable(i2c_dev);
@@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
- clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
- clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier);
-
/* Make sure clock divisor programmed correctly */
clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
@@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
void __iomem *base;
int irq;
int ret = 0;
+ int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -777,17 +774,39 @@ static int tegra_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c_dev);
+ if (!i2c_dev->hw->has_single_clk_source) {
+ ret = clk_prepare(i2c_dev->fast_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+ ret = clk_set_rate(i2c_dev->div_clk,
+ i2c_dev->bus_clk_rate * clk_multiplier);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret);
+ goto unprepare_fast_clk;
+ }
+
+ ret = clk_prepare(i2c_dev->div_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+ goto unprepare_fast_clk;
+ }
+
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- return ret;
+ goto unprepare_div_clk;
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
- return ret;
+ goto unprepare_div_clk;
}
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
@@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
if (ret) {
dev_err(&pdev->dev, "Failed to add I2C adapter\n");
- return ret;
+ goto unprepare_div_clk;
}
return 0;
+
+unprepare_div_clk:
+ clk_unprepare(i2c_dev->div_clk);
+
+unprepare_fast_clk:
+ if (!i2c_dev->hw->has_single_clk_source)
+ clk_unprepare(i2c_dev->fast_clk);
+
+ return ret;
}
static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
+
+ clk_unprepare(i2c_dev->div_clk);
+ if (!i2c_dev->hw->has_single_clk_source)
+ clk_unprepare(i2c_dev->fast_clk);
+
return 0;
}
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c
deleted file mode 100644
index 0dbc18c15c43..000000000000
--- a/drivers/i2c/i2c-acpi.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * I2C ACPI code
- *
- * Copyright (C) 2014 Intel Corp
- *
- * Author: Lan Tianyu <tianyu.lan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-#define pr_fmt(fmt) "I2C/ACPI : " fmt
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-
-struct acpi_i2c_handler_data {
- struct acpi_connection_info info;
- struct i2c_adapter *adapter;
-};
-
-struct gsb_buffer {
- u8 status;
- u8 len;
- union {
- u16 wdata;
- u8 bdata;
- u8 data[0];
- };
-} __packed;
-
-static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
-{
- struct i2c_board_info *info = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- struct acpi_resource_i2c_serialbus *sb;
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
- } else if (info->irq < 0) {
- struct resource r;
-
- if (acpi_dev_resource_interrupt(ares, 0, &r))
- info->irq = r.start;
- }
-
- /* Tell the ACPI core to skip this resource */
- return 1;
-}
-
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
-{
- struct i2c_adapter *adapter = data;
- struct list_head resource_list;
- struct i2c_board_info info;
- struct acpi_device *adev;
- int ret;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
-
- memset(&info, 0, sizeof(info));
- info.acpi_node.companion = adev;
- info.irq = -1;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_add_resource, &info);
- acpi_dev_free_resource_list(&resource_list);
-
- if (ret < 0 || !info.addr)
- return AE_OK;
-
- adev->power.flags.ignore_parent = true;
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
- if (!i2c_new_device(adapter, &info)) {
- adev->power.flags.ignore_parent = false;
- dev_err(&adapter->dev,
- "failed to add I2C device %s from ACPI\n",
- dev_name(&adev->dev));
- }
-
- return AE_OK;
-}
-
-/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
- * @adap: pointer to adapter
- *
- * Enumerate all I2C slave devices behind this adapter by walking the ACPI
- * namespace. When a device is found it will be added to the Linux device
- * model and bound to the corresponding ACPI handle.
- */
-void acpi_i2c_register_devices(struct i2c_adapter *adap)
-{
- acpi_handle handle;
- acpi_status status;
-
- if (!adap->dev.parent)
- return;
-
- handle = ACPI_HANDLE(adap->dev.parent);
- if (!handle)
- return;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_i2c_add_device, NULL,
- adap, NULL);
- if (ACPI_FAILURE(status))
- dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
-}
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
- u8 cmd, u8 *data, u8 data_len)
-{
-
- struct i2c_msg msgs[2];
- int ret;
- u8 *buffer;
-
- buffer = kzalloc(data_len, GFP_KERNEL);
- if (!buffer)
- return AE_NO_MEMORY;
-
- msgs[0].addr = client->addr;
- msgs[0].flags = client->flags;
- msgs[0].len = 1;
- msgs[0].buf = &cmd;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = client->flags | I2C_M_RD;
- msgs[1].len = data_len;
- msgs[1].buf = buffer;
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c read failed\n");
- else
- memcpy(data, buffer, data_len);
-
- kfree(buffer);
- return ret;
-}
-
-static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
- u8 cmd, u8 *data, u8 data_len)
-{
-
- struct i2c_msg msgs[1];
- u8 *buffer;
- int ret = AE_OK;
-
- buffer = kzalloc(data_len + 1, GFP_KERNEL);
- if (!buffer)
- return AE_NO_MEMORY;
-
- buffer[0] = cmd;
- memcpy(buffer + 1, data, data_len);
-
- msgs[0].addr = client->addr;
- msgs[0].flags = client->flags;
- msgs[0].len = data_len + 1;
- msgs[0].buf = buffer;
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c write failed\n");
-
- kfree(buffer);
- return ret;
-}
-
-static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
- u32 bits, u64 *value64,
- void *handler_context, void *region_context)
-{
- struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
- struct acpi_i2c_handler_data *data = handler_context;
- struct acpi_connection_info *info = &data->info;
- struct acpi_resource_i2c_serialbus *sb;
- struct i2c_adapter *adapter = data->adapter;
- struct i2c_client client;
- struct acpi_resource *ares;
- u32 accessor_type = function >> 16;
- u8 action = function & ACPI_IO_MASK;
- acpi_status ret = AE_OK;
- int status;
-
- ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
- if (ACPI_FAILURE(ret))
- return ret;
-
- if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- ret = AE_BAD_PARAMETER;
- goto err;
- }
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
- ret = AE_BAD_PARAMETER;
- goto err;
- }
-
- memset(&client, 0, sizeof(client));
- client.adapter = adapter;
- client.addr = sb->slave_address;
- client.flags = 0;
-
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- client.flags |= I2C_CLIENT_TEN;
-
- switch (accessor_type) {
- case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
- if (action == ACPI_READ) {
- status = i2c_smbus_read_byte(&client);
- if (status >= 0) {
- gsb->bdata = status;
- status = 0;
- }
- } else {
- status = i2c_smbus_write_byte(&client, gsb->bdata);
- }
- break;
-
- case ACPI_GSB_ACCESS_ATTRIB_BYTE:
- if (action == ACPI_READ) {
- status = i2c_smbus_read_byte_data(&client, command);
- if (status >= 0) {
- gsb->bdata = status;
- status = 0;
- }
- } else {
- status = i2c_smbus_write_byte_data(&client, command,
- gsb->bdata);
- }
- break;
-
- case ACPI_GSB_ACCESS_ATTRIB_WORD:
- if (action == ACPI_READ) {
- status = i2c_smbus_read_word_data(&client, command);
- if (status >= 0) {
- gsb->wdata = status;
- status = 0;
- }
- } else {
- status = i2c_smbus_write_word_data(&client, command,
- gsb->wdata);
- }
- break;
-
- case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
- if (action == ACPI_READ) {
- status = i2c_smbus_read_block_data(&client, command,
- gsb->data);
- if (status >= 0) {
- gsb->len = status;
- status = 0;
- }
- } else {
- status = i2c_smbus_write_block_data(&client, command,
- gsb->len, gsb->data);
- }
- break;
-
- case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
- if (action == ACPI_READ) {
- status = acpi_gsb_i2c_read_bytes(&client, command,
- gsb->data, info->access_length);
- if (status > 0)
- status = 0;
- } else {
- status = acpi_gsb_i2c_write_bytes(&client, command,
- gsb->data, info->access_length);
- }
- break;
-
- default:
- pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
- ret = AE_BAD_PARAMETER;
- goto err;
- }
-
- gsb->status = status;
-
- err:
- ACPI_FREE(ares);
- return ret;
-}
-
-
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{
- acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
- struct acpi_i2c_handler_data *data;
- acpi_status status;
-
- if (!handle)
- return -ENODEV;
-
- data = kzalloc(sizeof(struct acpi_i2c_handler_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->adapter = adapter;
- status = acpi_bus_attach_private_data(handle, (void *)data);
- if (ACPI_FAILURE(status)) {
- kfree(data);
- return -ENOMEM;
- }
-
- status = acpi_install_address_space_handler(handle,
- ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler,
- NULL,
- data);
- if (ACPI_FAILURE(status)) {
- dev_err(&adapter->dev, "Error installing i2c space handler\n");
- acpi_bus_detach_private_data(handle);
- kfree(data);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{
- acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
- struct acpi_i2c_handler_data *data;
- acpi_status status;
-
- if (!handle)
- return;
-
- acpi_remove_address_space_handler(handle,
- ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler);
-
- status = acpi_bus_get_private_data(handle, (void **)&data);
- if (ACPI_SUCCESS(status))
- kfree(data);
-
- acpi_bus_detach_private_data(handle);
-}
-#endif
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 632057a44615..ccfbbab82a15 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -27,6 +27,8 @@
OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
(based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and
(c) 2013 Wolfram Sang <wsa@the-dreams.de>
+ I2C ACPI code Copyright (C) 2014 Intel Corp
+ Author: Lan Tianyu <tianyu.lan@intel.com>
*/
#include <linux/module.h>
@@ -78,6 +80,368 @@ void i2c_transfer_trace_unreg(void)
static_key_slow_dec(&i2c_trace_msg);
}
+#if defined(CONFIG_ACPI)
+struct acpi_i2c_handler_data {
+ struct acpi_connection_info info;
+ struct i2c_adapter *adapter;
+};
+
+struct gsb_buffer {
+ u8 status;
+ u8 len;
+ union {
+ u16 wdata;
+ u8 bdata;
+ u8 data[0];
+ };
+} __packed;
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct i2c_board_info *info = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
+ }
+ } else if (info->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ info->irq = r.start;
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct list_head resource_list;
+ struct i2c_board_info info;
+ struct acpi_device *adev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ memset(&info, 0, sizeof(info));
+ info.acpi_node.companion = adev;
+ info.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_add_resource, &info);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !info.addr)
+ return AE_OK;
+
+ adev->power.flags.ignore_parent = true;
+ strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+ if (!i2c_new_device(adapter, &info)) {
+ adev->power.flags.ignore_parent = false;
+ dev_err(&adapter->dev,
+ "failed to add I2C device %s from ACPI\n",
+ dev_name(&adev->dev));
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adap: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!adap->dev.parent)
+ return;
+
+ handle = ACPI_HANDLE(adap->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_add_device, NULL,
+ adap, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+}
+
+#else /* CONFIG_ACPI */
+static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_I2C_OPREGION
+static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+ u8 cmd, u8 *data, u8 data_len)
+{
+
+ struct i2c_msg msgs[2];
+ int ret;
+ u8 *buffer;
+
+ buffer = kzalloc(data_len, GFP_KERNEL);
+ if (!buffer)
+ return AE_NO_MEMORY;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = 1;
+ msgs[0].buf = &cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags | I2C_M_RD;
+ msgs[1].len = data_len;
+ msgs[1].buf = buffer;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ dev_err(&client->adapter->dev, "i2c read failed\n");
+ else
+ memcpy(data, buffer, data_len);
+
+ kfree(buffer);
+ return ret;
+}
+
+static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+ u8 cmd, u8 *data, u8 data_len)
+{
+
+ struct i2c_msg msgs[1];
+ u8 *buffer;
+ int ret = AE_OK;
+
+ buffer = kzalloc(data_len + 1, GFP_KERNEL);
+ if (!buffer)
+ return AE_NO_MEMORY;
+
+ buffer[0] = cmd;
+ memcpy(buffer + 1, data, data_len);
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = data_len + 1;
+ msgs[0].buf = buffer;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ dev_err(&client->adapter->dev, "i2c write failed\n");
+
+ kfree(buffer);
+ return ret;
+}
+
+static acpi_status
+acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+ struct acpi_i2c_handler_data *data = handler_context;
+ struct acpi_connection_info *info = &data->info;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter = data->adapter;
+ struct i2c_client client;
+ struct acpi_resource *ares;
+ u32 accessor_type = function >> 16;
+ u8 action = function & ACPI_IO_MASK;
+ acpi_status ret = AE_OK;
+ int status;
+
+ ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+ if (ACPI_FAILURE(ret))
+ return ret;
+
+ if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ memset(&client, 0, sizeof(client));
+ client.adapter = adapter;
+ client.addr = sb->slave_address;
+ client.flags = 0;
+
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ client.flags |= I2C_CLIENT_TEN;
+
+ switch (accessor_type) {
+ case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_byte(&client);
+ if (status >= 0) {
+ gsb->bdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_byte(&client, gsb->bdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_BYTE:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_byte_data(&client, command);
+ if (status >= 0) {
+ gsb->bdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_byte_data(&client, command,
+ gsb->bdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_WORD:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_word_data(&client, command);
+ if (status >= 0) {
+ gsb->wdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_word_data(&client, command,
+ gsb->wdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_block_data(&client, command,
+ gsb->data);
+ if (status >= 0) {
+ gsb->len = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_block_data(&client, command,
+ gsb->len, gsb->data);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
+ if (action == ACPI_READ) {
+ status = acpi_gsb_i2c_read_bytes(&client, command,
+ gsb->data, info->access_length);
+ if (status > 0)
+ status = 0;
+ } else {
+ status = acpi_gsb_i2c_write_bytes(&client, command,
+ gsb->data, info->access_length);
+ }
+ break;
+
+ default:
+ pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ gsb->status = status;
+
+ err:
+ ACPI_FREE(ares);
+ return ret;
+}
+
+
+static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{
+ acpi_handle handle;
+ struct acpi_i2c_handler_data *data;
+ acpi_status status;
+
+ if (!adapter->dev.parent)
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(adapter->dev.parent);
+
+ if (!handle)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->adapter = adapter;
+ status = acpi_bus_attach_private_data(handle, (void *)data);
+ if (ACPI_FAILURE(status)) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ status = acpi_install_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &acpi_i2c_space_handler,
+ NULL,
+ data);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&adapter->dev, "Error installing i2c space handler\n");
+ acpi_bus_detach_private_data(handle);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{
+ acpi_handle handle;
+ struct acpi_i2c_handler_data *data;
+ acpi_status status;
+
+ if (!adapter->dev.parent)
+ return;
+
+ handle = ACPI_HANDLE(adapter->dev.parent);
+
+ if (!handle)
+ return;
+
+ acpi_remove_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &acpi_i2c_space_handler);
+
+ status = acpi_bus_get_private_data(handle, (void **)&data);
+ if (ACPI_SUCCESS(status))
+ kfree(data);
+
+ acpi_bus_detach_private_data(handle);
+}
+#else /* CONFIG_ACPI_I2C_OPREGION */
+static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{ }
+
+static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{ return 0; }
+#endif /* CONFIG_ACPI_I2C_OPREGION */
+
/* ------------------------------------------------------------------------- */
static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 8f5f2577f288..e8b8569788c0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1870,7 +1870,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
if (unlikely(hdr->total_len > qp->max_inline_data)) {
pr_err("%s() supported_len=0x%x,\n"
- " unspported len req=0x%x\n", __func__,
+ " unsupported len req=0x%x\n", __func__,
qp->max_inline_data, hdr->total_len);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index f8dfd76be89f..db3588df3546 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -511,7 +511,7 @@ int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
usnic_ib_qp_grp_state_to_string(old_state),
usnic_ib_qp_grp_state_to_string(new_state));
} else {
- usnic_err("Failed to transistion %u from %s to %s",
+ usnic_err("Failed to transition %u from %s to %s",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string(old_state),
usnic_ib_qp_grp_state_to_string(new_state));
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 24c41ba7d4e0..e29c04e2aff4 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -23,6 +23,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h> /* HZ */
#include <linux/mutex.h>
+#include <linux/timekeeping.h>
/*#include <asm/io.h>*/
@@ -30,6 +31,10 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Generic gameport layer");
MODULE_LICENSE("GPL");
+static bool use_ktime = true;
+module_param(use_ktime, bool, 0400);
+MODULE_PARM_DESC(use_ktime, "Use ktime for measuring I/O speed");
+
/*
* gameport_mutex protects entire gameport subsystem and is taken
* every time gameport port or driver registrered or unregistered.
@@ -76,6 +81,38 @@ static unsigned int get_time_pit(void)
static int gameport_measure_speed(struct gameport *gameport)
{
+ unsigned int i, t, tx;
+ u64 t1, t2, t3;
+ unsigned long flags;
+
+ if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
+ return 0;
+
+ tx = ~0;
+
+ for (i = 0; i < 50; i++) {
+ local_irq_save(flags);
+ t1 = ktime_get_ns();
+ for (t = 0; t < 50; t++)
+ gameport_read(gameport);
+ t2 = ktime_get_ns();
+ t3 = ktime_get_ns();
+ local_irq_restore(flags);
+ udelay(i * 10);
+ t = (t2 - t1) - (t3 - t2);
+ if (t < tx)
+ tx = t;
+ }
+
+ gameport_close(gameport);
+ t = 1000000 * 50;
+ if (tx)
+ t /= tx;
+ return t;
+}
+
+static int old_gameport_measure_speed(struct gameport *gameport)
+{
#if defined(__i386__)
unsigned int i, t, t1, t2, t3, tx;
@@ -521,7 +558,9 @@ static void gameport_add_port(struct gameport *gameport)
if (gameport->parent)
gameport->parent->child = gameport;
- gameport->speed = gameport_measure_speed(gameport);
+ gameport->speed = use_ktime ?
+ gameport_measure_speed(gameport) :
+ old_gameport_measure_speed(gameport);
list_add_tail(&gameport->node, &gameport_list);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index ab0fdcd36e18..4284080e481d 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -36,6 +36,7 @@
#include <linux/gameport.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
+#include <linux/timekeeping.h>
#define DRIVER_DESC "Analog joystick and gamepad driver"
@@ -43,6 +44,10 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+static bool use_ktime = true;
+module_param(use_ktime, bool, 0400);
+MODULE_PARM_DESC(use_ktime, "Use ktime for measuring I/O speed");
+
/*
* Option parsing.
*/
@@ -171,6 +176,25 @@ static unsigned long analog_faketime = 0;
#warning Precise timer not defined for this architecture.
#endif
+static inline u64 get_time(void)
+{
+ if (use_ktime) {
+ return ktime_get_ns();
+ } else {
+ unsigned int x;
+ GET_TIME(x);
+ return x;
+ }
+}
+
+static inline unsigned int delta(u64 x, u64 y)
+{
+ if (use_ktime)
+ return y - x;
+ else
+ return DELTA((unsigned int)x, (unsigned int)y);
+}
+
/*
* analog_decode() decodes analog joystick data and reports input events.
*/
@@ -226,7 +250,8 @@ static void analog_decode(struct analog *analog, int *axes, int *initial, int bu
static int analog_cooked_read(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
- unsigned int time[4], start, loop, now, loopout, timeout;
+ u64 time[4], start, loop, now;
+ unsigned int loopout, timeout;
unsigned char data[4], this, last;
unsigned long flags;
int i, j;
@@ -236,7 +261,7 @@ static int analog_cooked_read(struct analog_port *port)
local_irq_save(flags);
gameport_trigger(gameport);
- GET_TIME(now);
+ now = get_time();
local_irq_restore(flags);
start = now;
@@ -249,16 +274,16 @@ static int analog_cooked_read(struct analog_port *port)
local_irq_disable();
this = gameport_read(gameport) & port->mask;
- GET_TIME(now);
+ now = get_time();
local_irq_restore(flags);
- if ((last ^ this) && (DELTA(loop, now) < loopout)) {
+ if ((last ^ this) && (delta(loop, now) < loopout)) {
data[i] = last ^ this;
time[i] = now;
i++;
}
- } while (this && (i < 4) && (DELTA(start, now) < timeout));
+ } while (this && (i < 4) && (delta(start, now) < timeout));
this <<= 4;
@@ -266,7 +291,7 @@ static int analog_cooked_read(struct analog_port *port)
this |= data[i];
for (j = 0; j < 4; j++)
if (data[i] & (1 << j))
- port->axes[j] = (DELTA(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop;
+ port->axes[j] = (delta(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop;
}
return -(this != port->mask);
@@ -365,31 +390,39 @@ static void analog_close(struct input_dev *dev)
static void analog_calibrate_timer(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
- unsigned int i, t, tx, t1, t2, t3;
+ unsigned int i, t, tx;
+ u64 t1, t2, t3;
unsigned long flags;
- local_irq_save(flags);
- GET_TIME(t1);
+ if (use_ktime) {
+ port->speed = 1000000;
+ } else {
+ local_irq_save(flags);
+ t1 = get_time();
#ifdef FAKE_TIME
- analog_faketime += 830;
+ analog_faketime += 830;
#endif
- mdelay(1);
- GET_TIME(t2);
- GET_TIME(t3);
- local_irq_restore(flags);
+ mdelay(1);
+ t2 = get_time();
+ t3 = get_time();
+ local_irq_restore(flags);
- port->speed = DELTA(t1, t2) - DELTA(t2, t3);
+ port->speed = delta(t1, t2) - delta(t2, t3);
+ }
tx = ~0;
for (i = 0; i < 50; i++) {
local_irq_save(flags);
- GET_TIME(t1);
- for (t = 0; t < 50; t++) { gameport_read(gameport); GET_TIME(t2); }
- GET_TIME(t3);
+ t1 = get_time();
+ for (t = 0; t < 50; t++) {
+ gameport_read(gameport);
+ t2 = get_time();
+ }
+ t3 = get_time();
local_irq_restore(flags);
udelay(i);
- t = DELTA(t1, t2) - DELTA(t2, t3);
+ t = delta(t1, t2) - delta(t2, t3);
if (t < tx) tx = t;
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 177602cf7079..cd13c82ca0a1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -126,7 +126,9 @@ static const struct xpad_device {
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
+ { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
@@ -140,10 +142,17 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
+ { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
@@ -156,28 +165,50 @@ static const struct xpad_device {
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
+ { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX },
+ { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -274,6 +305,9 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
+ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
{ }
};
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 791781ade4e7..72d3499bb029 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -22,6 +22,7 @@
*/
#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -38,6 +39,7 @@
* @row_shift: log2 or number of rows, rounded up
* @keymap_data: Matrix keymap data used to convert to keyscan values
* @ghost_filter: true to enable the matrix key-ghosting filter
+ * @valid_keys: bitmap of existing keys for each matrix column
* @old_kb_state: bitmap of keys pressed last scan
* @dev: Device pointer
* @idev: Input device
@@ -49,6 +51,7 @@ struct cros_ec_keyb {
int row_shift;
const struct matrix_keymap_data *keymap_data;
bool ghost_filter;
+ uint8_t *valid_keys;
uint8_t *old_kb_state;
struct device *dev;
@@ -57,39 +60,15 @@ struct cros_ec_keyb {
};
-static bool cros_ec_keyb_row_has_ghosting(struct cros_ec_keyb *ckdev,
- uint8_t *buf, int row)
-{
- int pressed_in_row = 0;
- int row_has_teeth = 0;
- int col, mask;
-
- mask = 1 << row;
- for (col = 0; col < ckdev->cols; col++) {
- if (buf[col] & mask) {
- pressed_in_row++;
- row_has_teeth |= buf[col] & ~mask;
- if (pressed_in_row > 1 && row_has_teeth) {
- /* ghosting */
- dev_dbg(ckdev->dev,
- "ghost found at: r%d c%d, pressed %d, teeth 0x%x\n",
- row, col, pressed_in_row,
- row_has_teeth);
- return true;
- }
- }
- }
-
- return false;
-}
-
/*
* Returns true when there is at least one combination of pressed keys that
* results in ghosting.
*/
static bool cros_ec_keyb_has_ghosting(struct cros_ec_keyb *ckdev, uint8_t *buf)
{
- int row;
+ int col1, col2, buf1, buf2;
+ struct device *dev = ckdev->dev;
+ uint8_t *valid_keys = ckdev->valid_keys;
/*
* Ghosting happens if for any pressed key X there are other keys
@@ -103,27 +82,23 @@ static bool cros_ec_keyb_has_ghosting(struct cros_ec_keyb *ckdev, uint8_t *buf)
*
* In this case only X, Y, and Z are pressed, but g appears to be
* pressed too (see Wikipedia).
- *
- * We can detect ghosting in a single pass (*) over the keyboard state
- * by maintaining two arrays. pressed_in_row counts how many pressed
- * keys we have found in a row. row_has_teeth is true if any of the
- * pressed keys for this row has other pressed keys in its column. If
- * at any point of the scan we find that a row has multiple pressed
- * keys, and at least one of them is at the intersection with a column
- * with multiple pressed keys, we're sure there is ghosting.
- * Conversely, if there is ghosting, we will detect such situation for
- * at least one key during the pass.
- *
- * (*) This looks linear in the number of keys, but it's not. We can
- * cheat because the number of rows is small.
*/
- for (row = 0; row < ckdev->rows; row++)
- if (cros_ec_keyb_row_has_ghosting(ckdev, buf, row))
- return true;
+ for (col1 = 0; col1 < ckdev->cols; col1++) {
+ buf1 = buf[col1] & valid_keys[col1];
+ for (col2 = col1 + 1; col2 < ckdev->cols; col2++) {
+ buf2 = buf[col2] & valid_keys[col2];
+ if (hweight8(buf1 & buf2) > 1) {
+ dev_dbg(dev, "ghost found at: B[%02d]:0x%02x & B[%02d]:0x%02x",
+ col1, buf1, col2, buf2);
+ return true;
+ }
+ }
+ }
return false;
}
+
/*
* Compares the new keyboard state to the old one and produces key
* press/release events accordingly. The keyboard state is 13 bytes (one byte
@@ -222,6 +197,30 @@ static void cros_ec_keyb_close(struct input_dev *dev)
free_irq(ec->irq, ckdev);
}
+/*
+ * Walks keycodes flipping bit in buffer COLUMNS deep where bit is ROW. Used by
+ * ghosting logic to ignore NULL or virtual keys.
+ */
+static void cros_ec_keyb_compute_valid_keys(struct cros_ec_keyb *ckdev)
+{
+ int row, col;
+ int row_shift = ckdev->row_shift;
+ unsigned short *keymap = ckdev->idev->keycode;
+ unsigned short code;
+
+ BUG_ON(ckdev->idev->keycodesize != sizeof(*keymap));
+
+ for (col = 0; col < ckdev->cols; col++) {
+ for (row = 0; row < ckdev->rows; row++) {
+ code = keymap[MATRIX_SCAN_CODE(row, col, row_shift)];
+ if (code && (code != KEY_BATTERY))
+ ckdev->valid_keys[col] |= 1 << row;
+ }
+ dev_dbg(ckdev->dev, "valid_keys[%02d] = 0x%02x\n",
+ col, ckdev->valid_keys[col]);
+ }
+}
+
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
@@ -242,6 +241,11 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
&ckdev->cols);
if (err)
return err;
+
+ ckdev->valid_keys = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ if (!ckdev->valid_keys)
+ return -ENOMEM;
+
ckdev->old_kb_state = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
if (!ckdev->old_kb_state)
return -ENOMEM;
@@ -285,6 +289,8 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
input_set_capability(idev, EV_MSC, MSC_SCAN);
input_set_drvdata(idev, ckdev);
ckdev->idev = idev;
+ cros_ec_keyb_compute_valid_keys(ckdev);
+
err = input_register_device(ckdev->idev);
if (err) {
dev_err(dev, "cannot register input device\n");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 2ff4425a893b..23297ab6163f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -144,6 +144,17 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX77693_HAPTIC
+ tristate "MAXIM MAX77693 haptic controller support"
+ depends on MFD_MAX77693 && PWM
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for the haptic controller on
+ MAXIM MAX77693 chip.
+
+ To compile this driver as module, choose M here: the
+ module will be called max77693-haptic.
+
config INPUT_MAX8925_ONKEY
tristate "MAX8925 ONKEY support"
depends on MFD_MAX8925
@@ -451,6 +462,16 @@ config HP_SDC_RTC
Say Y here if you want to support the built-in real time clock
of the HP SDC controller.
+config INPUT_PALMAS_PWRBUTTON
+ tristate "Palmas Power button Driver"
+ depends on MFD_PALMAS
+ help
+ Say Y here if you want to enable power key reporting via the
+ Palmas family of PMICs.
+
+ To compile this driver as a module, choose M here. The module will
+ be called palmas_pwrbutton.
+
config INPUT_PCF50633_PMU
tristate "PCF50633 PMU events"
depends on MFD_PCF50633
@@ -676,4 +697,26 @@ config INPUT_SOC_BUTTON_ARRAY
To compile this driver as a module, choose M here: the
module will be called soc_button_array.
+config INPUT_DRV260X_HAPTICS
+ tristate "TI DRV260X haptics support"
+ depends on INPUT && I2C && GPIOLIB
+ select INPUT_FF_MEMLESS
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the TI DRV260X haptics driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called drv260x-haptics.
+
+config INPUT_DRV2667_HAPTICS
+ tristate "TI DRV2667 haptics support"
+ depends on INPUT && I2C
+ select INPUT_FF_MEMLESS
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the TI DRV2667 haptics driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called drv260x-haptics.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4955ad322a01..19c760361f80 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_DRV260X_HAPTICS) += drv260x.o
+obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
@@ -35,11 +37,13 @@ obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o
+obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
new file mode 100644
index 000000000000..cab87f5ce6d3
--- /dev/null
+++ b/drivers/input/misc/drv260x.c
@@ -0,0 +1,741 @@
+/*
+ * DRV260X haptics driver family
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2014 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <dt-bindings/input/ti-drv260x.h>
+#include <linux/platform_data/drv260x-pdata.h>
+
+#define DRV260X_STATUS 0x0
+#define DRV260X_MODE 0x1
+#define DRV260X_RT_PB_IN 0x2
+#define DRV260X_LIB_SEL 0x3
+#define DRV260X_WV_SEQ_1 0x4
+#define DRV260X_WV_SEQ_2 0x5
+#define DRV260X_WV_SEQ_3 0x6
+#define DRV260X_WV_SEQ_4 0x7
+#define DRV260X_WV_SEQ_5 0x8
+#define DRV260X_WV_SEQ_6 0x9
+#define DRV260X_WV_SEQ_7 0xa
+#define DRV260X_WV_SEQ_8 0xb
+#define DRV260X_GO 0xc
+#define DRV260X_OVERDRIVE_OFF 0xd
+#define DRV260X_SUSTAIN_P_OFF 0xe
+#define DRV260X_SUSTAIN_N_OFF 0xf
+#define DRV260X_BRAKE_OFF 0x10
+#define DRV260X_A_TO_V_CTRL 0x11
+#define DRV260X_A_TO_V_MIN_INPUT 0x12
+#define DRV260X_A_TO_V_MAX_INPUT 0x13
+#define DRV260X_A_TO_V_MIN_OUT 0x14
+#define DRV260X_A_TO_V_MAX_OUT 0x15
+#define DRV260X_RATED_VOLT 0x16
+#define DRV260X_OD_CLAMP_VOLT 0x17
+#define DRV260X_CAL_COMP 0x18
+#define DRV260X_CAL_BACK_EMF 0x19
+#define DRV260X_FEEDBACK_CTRL 0x1a
+#define DRV260X_CTRL1 0x1b
+#define DRV260X_CTRL2 0x1c
+#define DRV260X_CTRL3 0x1d
+#define DRV260X_CTRL4 0x1e
+#define DRV260X_CTRL5 0x1f
+#define DRV260X_LRA_LOOP_PERIOD 0x20
+#define DRV260X_VBAT_MON 0x21
+#define DRV260X_LRA_RES_PERIOD 0x22
+#define DRV260X_MAX_REG 0x23
+
+#define DRV260X_GO_BIT 0x01
+
+/* Library Selection */
+#define DRV260X_LIB_SEL_MASK 0x07
+#define DRV260X_LIB_SEL_RAM 0x0
+#define DRV260X_LIB_SEL_OD 0x1
+#define DRV260X_LIB_SEL_40_60 0x2
+#define DRV260X_LIB_SEL_60_80 0x3
+#define DRV260X_LIB_SEL_100_140 0x4
+#define DRV260X_LIB_SEL_140_PLUS 0x5
+
+#define DRV260X_LIB_SEL_HIZ_MASK 0x10
+#define DRV260X_LIB_SEL_HIZ_EN 0x01
+#define DRV260X_LIB_SEL_HIZ_DIS 0
+
+/* Mode register */
+#define DRV260X_STANDBY (1 << 6)
+#define DRV260X_STANDBY_MASK 0x40
+#define DRV260X_INTERNAL_TRIGGER 0x00
+#define DRV260X_EXT_TRIGGER_EDGE 0x01
+#define DRV260X_EXT_TRIGGER_LEVEL 0x02
+#define DRV260X_PWM_ANALOG_IN 0x03
+#define DRV260X_AUDIOHAPTIC 0x04
+#define DRV260X_RT_PLAYBACK 0x05
+#define DRV260X_DIAGNOSTICS 0x06
+#define DRV260X_AUTO_CAL 0x07
+
+/* Audio to Haptics Control */
+#define DRV260X_AUDIO_HAPTICS_PEAK_10MS (0 << 2)
+#define DRV260X_AUDIO_HAPTICS_PEAK_20MS (1 << 2)
+#define DRV260X_AUDIO_HAPTICS_PEAK_30MS (2 << 2)
+#define DRV260X_AUDIO_HAPTICS_PEAK_40MS (3 << 2)
+
+#define DRV260X_AUDIO_HAPTICS_FILTER_100HZ 0x00
+#define DRV260X_AUDIO_HAPTICS_FILTER_125HZ 0x01
+#define DRV260X_AUDIO_HAPTICS_FILTER_150HZ 0x02
+#define DRV260X_AUDIO_HAPTICS_FILTER_200HZ 0x03
+
+/* Min/Max Input/Output Voltages */
+#define DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT 0x19
+#define DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT 0x64
+#define DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT 0x19
+#define DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT 0xFF
+
+/* Feedback register */
+#define DRV260X_FB_REG_ERM_MODE 0x7f
+#define DRV260X_FB_REG_LRA_MODE (1 << 7)
+
+#define DRV260X_BRAKE_FACTOR_MASK 0x1f
+#define DRV260X_BRAKE_FACTOR_2X (1 << 0)
+#define DRV260X_BRAKE_FACTOR_3X (2 << 4)
+#define DRV260X_BRAKE_FACTOR_4X (3 << 4)
+#define DRV260X_BRAKE_FACTOR_6X (4 << 4)
+#define DRV260X_BRAKE_FACTOR_8X (5 << 4)
+#define DRV260X_BRAKE_FACTOR_16 (6 << 4)
+#define DRV260X_BRAKE_FACTOR_DIS (7 << 4)
+
+#define DRV260X_LOOP_GAIN_LOW 0xf3
+#define DRV260X_LOOP_GAIN_MED (1 << 2)
+#define DRV260X_LOOP_GAIN_HIGH (2 << 2)
+#define DRV260X_LOOP_GAIN_VERY_HIGH (3 << 2)
+
+#define DRV260X_BEMF_GAIN_0 0xfc
+#define DRV260X_BEMF_GAIN_1 (1 << 0)
+#define DRV260X_BEMF_GAIN_2 (2 << 0)
+#define DRV260X_BEMF_GAIN_3 (3 << 0)
+
+/* Control 1 register */
+#define DRV260X_AC_CPLE_EN (1 << 5)
+#define DRV260X_STARTUP_BOOST (1 << 7)
+
+/* Control 2 register */
+
+#define DRV260X_IDISS_TIME_45 0
+#define DRV260X_IDISS_TIME_75 (1 << 0)
+#define DRV260X_IDISS_TIME_150 (1 << 1)
+#define DRV260X_IDISS_TIME_225 0x03
+
+#define DRV260X_BLANK_TIME_45 (0 << 2)
+#define DRV260X_BLANK_TIME_75 (1 << 2)
+#define DRV260X_BLANK_TIME_150 (2 << 2)
+#define DRV260X_BLANK_TIME_225 (3 << 2)
+
+#define DRV260X_SAMP_TIME_150 (0 << 4)
+#define DRV260X_SAMP_TIME_200 (1 << 4)
+#define DRV260X_SAMP_TIME_250 (2 << 4)
+#define DRV260X_SAMP_TIME_300 (3 << 4)
+
+#define DRV260X_BRAKE_STABILIZER (1 << 6)
+#define DRV260X_UNIDIR_IN (0 << 7)
+#define DRV260X_BIDIR_IN (1 << 7)
+
+/* Control 3 Register */
+#define DRV260X_LRA_OPEN_LOOP (1 << 0)
+#define DRV260X_ANANLOG_IN (1 << 1)
+#define DRV260X_LRA_DRV_MODE (1 << 2)
+#define DRV260X_RTP_UNSIGNED_DATA (1 << 3)
+#define DRV260X_SUPPLY_COMP_DIS (1 << 4)
+#define DRV260X_ERM_OPEN_LOOP (1 << 5)
+#define DRV260X_NG_THRESH_0 (0 << 6)
+#define DRV260X_NG_THRESH_2 (1 << 6)
+#define DRV260X_NG_THRESH_4 (2 << 6)
+#define DRV260X_NG_THRESH_8 (3 << 6)
+
+/* Control 4 Register */
+#define DRV260X_AUTOCAL_TIME_150MS (0 << 4)
+#define DRV260X_AUTOCAL_TIME_250MS (1 << 4)
+#define DRV260X_AUTOCAL_TIME_500MS (2 << 4)
+#define DRV260X_AUTOCAL_TIME_1000MS (3 << 4)
+
+/**
+ * struct drv260x_data -
+ * @input_dev - Pointer to the input device
+ * @client - Pointer to the I2C client
+ * @regmap - Register map of the device
+ * @work - Work item used to off load the enable/disable of the vibration
+ * @enable_gpio - Pointer to the gpio used for enable/disabling
+ * @regulator - Pointer to the regulator for the IC
+ * @magnitude - Magnitude of the vibration event
+ * @mode - The operating mode of the IC (LRA_NO_CAL, ERM or LRA)
+ * @library - The vibration library to be used
+ * @rated_voltage - The rated_voltage of the actuator
+ * @overdriver_voltage - The over drive voltage of the actuator
+**/
+struct drv260x_data {
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct work_struct work;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ u32 magnitude;
+ u32 mode;
+ u32 library;
+ int rated_voltage;
+ int overdrive_voltage;
+};
+
+static struct reg_default drv260x_reg_defs[] = {
+ { DRV260X_STATUS, 0xe0 },
+ { DRV260X_MODE, 0x40 },
+ { DRV260X_RT_PB_IN, 0x00 },
+ { DRV260X_LIB_SEL, 0x00 },
+ { DRV260X_WV_SEQ_1, 0x01 },
+ { DRV260X_WV_SEQ_2, 0x00 },
+ { DRV260X_WV_SEQ_3, 0x00 },
+ { DRV260X_WV_SEQ_4, 0x00 },
+ { DRV260X_WV_SEQ_5, 0x00 },
+ { DRV260X_WV_SEQ_6, 0x00 },
+ { DRV260X_WV_SEQ_7, 0x00 },
+ { DRV260X_WV_SEQ_8, 0x00 },
+ { DRV260X_GO, 0x00 },
+ { DRV260X_OVERDRIVE_OFF, 0x00 },
+ { DRV260X_SUSTAIN_P_OFF, 0x00 },
+ { DRV260X_SUSTAIN_N_OFF, 0x00 },
+ { DRV260X_BRAKE_OFF, 0x00 },
+ { DRV260X_A_TO_V_CTRL, 0x05 },
+ { DRV260X_A_TO_V_MIN_INPUT, 0x19 },
+ { DRV260X_A_TO_V_MAX_INPUT, 0xff },
+ { DRV260X_A_TO_V_MIN_OUT, 0x19 },
+ { DRV260X_A_TO_V_MAX_OUT, 0xff },
+ { DRV260X_RATED_VOLT, 0x3e },
+ { DRV260X_OD_CLAMP_VOLT, 0x8c },
+ { DRV260X_CAL_COMP, 0x0c },
+ { DRV260X_CAL_BACK_EMF, 0x6c },
+ { DRV260X_FEEDBACK_CTRL, 0x36 },
+ { DRV260X_CTRL1, 0x93 },
+ { DRV260X_CTRL2, 0xfa },
+ { DRV260X_CTRL3, 0xa0 },
+ { DRV260X_CTRL4, 0x20 },
+ { DRV260X_CTRL5, 0x80 },
+ { DRV260X_LRA_LOOP_PERIOD, 0x33 },
+ { DRV260X_VBAT_MON, 0x00 },
+ { DRV260X_LRA_RES_PERIOD, 0x00 },
+};
+
+#define DRV260X_DEF_RATED_VOLT 0x90
+#define DRV260X_DEF_OD_CLAMP_VOLT 0x90
+
+/**
+ * Rated and Overdriver Voltages:
+ * Calculated using the formula r = v * 255 / 5.6
+ * where r is what will be written to the register
+ * and v is the rated or overdriver voltage of the actuator
+ **/
+static int drv260x_calculate_voltage(unsigned int voltage)
+{
+ return (voltage * 255 / 5600);
+}
+
+static void drv260x_worker(struct work_struct *work)
+{
+ struct drv260x_data *haptics = container_of(work, struct drv260x_data, work);
+ int error;
+
+ gpiod_set_value(haptics->enable_gpio, 1);
+ /* Data sheet says to wait 250us before trying to communicate */
+ udelay(250);
+
+ error = regmap_write(haptics->regmap,
+ DRV260X_MODE, DRV260X_RT_PLAYBACK);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write set mode: %d\n", error);
+ } else {
+ error = regmap_write(haptics->regmap,
+ DRV260X_RT_PB_IN, haptics->magnitude);
+ if (error)
+ dev_err(&haptics->client->dev,
+ "Failed to set magnitude: %d\n", error);
+ }
+}
+
+static int drv260x_haptics_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct drv260x_data *haptics = input_get_drvdata(input);
+
+ haptics->mode = DRV260X_LRA_NO_CAL_MODE;
+
+ if (effect->u.rumble.strong_magnitude > 0)
+ haptics->magnitude = effect->u.rumble.strong_magnitude;
+ else if (effect->u.rumble.weak_magnitude > 0)
+ haptics->magnitude = effect->u.rumble.weak_magnitude;
+ else
+ haptics->magnitude = 0;
+
+ schedule_work(&haptics->work);
+
+ return 0;
+}
+
+static void drv260x_close(struct input_dev *input)
+{
+ struct drv260x_data *haptics = input_get_drvdata(input);
+ int error;
+
+ cancel_work_sync(&haptics->work);
+
+ error = regmap_write(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY);
+ if (error)
+ dev_err(&haptics->client->dev,
+ "Failed to enter standby mode: %d\n", error);
+
+ gpiod_set_value(haptics->enable_gpio, 0);
+}
+
+static const struct reg_default drv260x_lra_cal_regs[] = {
+ { DRV260X_MODE, DRV260X_AUTO_CAL },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
+ { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
+ DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
+};
+
+static const struct reg_default drv260x_lra_init_regs[] = {
+ { DRV260X_MODE, DRV260X_RT_PLAYBACK },
+ { DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
+ DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
+ { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
+ { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
+ { DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT },
+ { DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT },
+ { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
+ DRV260X_BRAKE_FACTOR_2X | DRV260X_LOOP_GAIN_MED |
+ DRV260X_BEMF_GAIN_3 },
+ { DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
+ { DRV260X_CTRL2, DRV260X_SAMP_TIME_250 },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ANANLOG_IN },
+ { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
+};
+
+static const struct reg_default drv260x_erm_cal_regs[] = {
+ { DRV260X_MODE, DRV260X_AUTO_CAL },
+ { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
+ { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
+ { DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT },
+ { DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT },
+ { DRV260X_FEEDBACK_CTRL, DRV260X_BRAKE_FACTOR_3X |
+ DRV260X_LOOP_GAIN_MED | DRV260X_BEMF_GAIN_2 },
+ { DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
+ { DRV260X_CTRL2, DRV260X_SAMP_TIME_250 | DRV260X_BLANK_TIME_75 |
+ DRV260X_IDISS_TIME_75 },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ERM_OPEN_LOOP },
+ { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
+};
+
+static int drv260x_init(struct drv260x_data *haptics)
+{
+ int error;
+ unsigned int cal_buf;
+
+ error = regmap_write(haptics->regmap,
+ DRV260X_RATED_VOLT, haptics->rated_voltage);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write DRV260X_RATED_VOLT register: %d\n",
+ error);
+ return error;
+ }
+
+ error = regmap_write(haptics->regmap,
+ DRV260X_OD_CLAMP_VOLT, haptics->overdrive_voltage);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write DRV260X_OD_CLAMP_VOLT register: %d\n",
+ error);
+ return error;
+ }
+
+ switch (haptics->mode) {
+ case DRV260X_LRA_MODE:
+ error = regmap_register_patch(haptics->regmap,
+ drv260x_lra_cal_regs,
+ ARRAY_SIZE(drv260x_lra_cal_regs));
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write LRA calibration registers: %d\n",
+ error);
+ return error;
+ }
+
+ break;
+
+ case DRV260X_ERM_MODE:
+ error = regmap_register_patch(haptics->regmap,
+ drv260x_erm_cal_regs,
+ ARRAY_SIZE(drv260x_erm_cal_regs));
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write ERM calibration registers: %d\n",
+ error);
+ return error;
+ }
+
+ error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL,
+ DRV260X_LIB_SEL_MASK,
+ haptics->library);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write DRV260X_LIB_SEL register: %d\n",
+ error);
+ return error;
+ }
+
+ break;
+
+ default:
+ error = regmap_register_patch(haptics->regmap,
+ drv260x_lra_init_regs,
+ ARRAY_SIZE(drv260x_lra_init_regs));
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write LRA init registers: %d\n",
+ error);
+ return error;
+ }
+
+ error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL,
+ DRV260X_LIB_SEL_MASK,
+ haptics->library);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write DRV260X_LIB_SEL register: %d\n",
+ error);
+ return error;
+ }
+
+ /* No need to set GO bit here */
+ return 0;
+ }
+
+ error = regmap_write(haptics->regmap, DRV260X_GO, DRV260X_GO_BIT);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write GO register: %d\n",
+ error);
+ return error;
+ }
+
+ do {
+ error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to read GO register: %d\n",
+ error);
+ return error;
+ }
+ } while (cal_buf == DRV260X_GO_BIT);
+
+ return 0;
+}
+
+static const struct regmap_config drv260x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = DRV260X_MAX_REG,
+ .reg_defaults = drv260x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(drv260x_reg_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+#ifdef CONFIG_OF
+static int drv260x_parse_dt(struct device *dev,
+ struct drv260x_data *haptics)
+{
+ struct device_node *np = dev->of_node;
+ unsigned int voltage;
+ int error;
+
+ error = of_property_read_u32(np, "mode", &haptics->mode);
+ if (error) {
+ dev_err(dev, "%s: No entry for mode\n", __func__);
+ return error;
+ }
+
+ error = of_property_read_u32(np, "library-sel", &haptics->library);
+ if (error) {
+ dev_err(dev, "%s: No entry for library selection\n",
+ __func__);
+ return error;
+ }
+
+ error = of_property_read_u32(np, "vib-rated-mv", &voltage);
+ if (!error)
+ haptics->rated_voltage = drv260x_calculate_voltage(voltage);
+
+
+ error = of_property_read_u32(np, "vib-overdrive-mv", &voltage);
+ if (!error)
+ haptics->overdrive_voltage = drv260x_calculate_voltage(voltage);
+
+ return 0;
+}
+#else
+static inline int drv260x_parse_dt(struct device *dev,
+ struct drv260x_data *haptics)
+{
+ dev_err(dev, "no platform data defined\n");
+
+ return -EINVAL;
+}
+#endif
+
+static int drv260x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct drv260x_data *haptics;
+ int error;
+
+ haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
+
+ if (pdata) {
+ haptics->mode = pdata->mode;
+ haptics->library = pdata->library_selection;
+ if (pdata->vib_overdrive_voltage)
+ haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage);
+ if (pdata->vib_rated_voltage)
+ haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage);
+ } else if (client->dev.of_node) {
+ error = drv260x_parse_dt(&client->dev, haptics);
+ if (error)
+ return error;
+ } else {
+ dev_err(&client->dev, "Platform data not set\n");
+ return -ENODEV;
+ }
+
+
+ if (haptics->mode < DRV260X_LRA_MODE ||
+ haptics->mode > DRV260X_ERM_MODE) {
+ dev_err(&client->dev,
+ "Vibrator mode is invalid: %i\n",
+ haptics->mode);
+ return -EINVAL;
+ }
+
+ if (haptics->library < DRV260X_LIB_EMPTY ||
+ haptics->library > DRV260X_ERM_LIB_F) {
+ dev_err(&client->dev,
+ "Library value is invalid: %i\n", haptics->library);
+ return -EINVAL;
+ }
+
+ if (haptics->mode == DRV260X_LRA_MODE &&
+ haptics->library != DRV260X_LIB_EMPTY &&
+ haptics->library != DRV260X_LIB_LRA) {
+ dev_err(&client->dev,
+ "LRA Mode with ERM Library mismatch\n");
+ return -EINVAL;
+ }
+
+ if (haptics->mode == DRV260X_ERM_MODE &&
+ (haptics->library == DRV260X_LIB_EMPTY ||
+ haptics->library == DRV260X_LIB_LRA)) {
+ dev_err(&client->dev,
+ "ERM Mode with LRA Library mismatch\n");
+ return -EINVAL;
+ }
+
+ haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+ if (IS_ERR(haptics->regulator)) {
+ error = PTR_ERR(haptics->regulator);
+ dev_err(&client->dev,
+ "unable to get regulator, error: %d\n", error);
+ return error;
+ }
+
+ haptics->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+ if (IS_ERR(haptics->enable_gpio)) {
+ error = PTR_ERR(haptics->enable_gpio);
+ if (error != -ENOENT && error != -ENOSYS)
+ return error;
+ haptics->enable_gpio = NULL;
+ } else {
+ gpiod_direction_output(haptics->enable_gpio, 1);
+ }
+
+ haptics->input_dev = devm_input_allocate_device(&client->dev);
+ if (!haptics->input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ haptics->input_dev->name = "drv260x:haptics";
+ haptics->input_dev->dev.parent = client->dev.parent;
+ haptics->input_dev->close = drv260x_close;
+ input_set_drvdata(haptics->input_dev, haptics);
+ input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(haptics->input_dev, NULL,
+ drv260x_haptics_play);
+ if (error) {
+ dev_err(&client->dev, "input_ff_create() failed: %d\n",
+ error);
+ return error;
+ }
+
+ INIT_WORK(&haptics->work, drv260x_worker);
+
+ haptics->client = client;
+ i2c_set_clientdata(client, haptics);
+
+ haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
+ if (IS_ERR(haptics->regmap)) {
+ error = PTR_ERR(haptics->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ error = drv260x_init(haptics);
+ if (error) {
+ dev_err(&client->dev, "Device init failed: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(haptics->input_dev);
+ if (error) {
+ dev_err(&client->dev, "couldn't register input device: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int drv260x_suspend(struct device *dev)
+{
+ struct drv260x_data *haptics = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&haptics->input_dev->mutex);
+
+ if (haptics->input_dev->users) {
+ ret = regmap_update_bits(haptics->regmap,
+ DRV260X_MODE,
+ DRV260X_STANDBY_MASK,
+ DRV260X_STANDBY);
+ if (ret) {
+ dev_err(dev, "Failed to set standby mode\n");
+ goto out;
+ }
+
+ gpiod_set_value(haptics->enable_gpio, 0);
+
+ ret = regulator_disable(haptics->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to disable regulator\n");
+ regmap_update_bits(haptics->regmap,
+ DRV260X_MODE,
+ DRV260X_STANDBY_MASK, 0);
+ }
+ }
+out:
+ mutex_unlock(&haptics->input_dev->mutex);
+ return ret;
+}
+
+static int drv260x_resume(struct device *dev)
+{
+ struct drv260x_data *haptics = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&haptics->input_dev->mutex);
+
+ if (haptics->input_dev->users) {
+ ret = regulator_enable(haptics->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator\n");
+ goto out;
+ }
+
+ ret = regmap_update_bits(haptics->regmap,
+ DRV260X_MODE,
+ DRV260X_STANDBY_MASK, 0);
+ if (ret) {
+ dev_err(dev, "Failed to unset standby mode\n");
+ regulator_disable(haptics->regulator);
+ goto out;
+ }
+
+ gpiod_set_value(haptics->enable_gpio, 1);
+ }
+
+out:
+ mutex_unlock(&haptics->input_dev->mutex);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume);
+
+static const struct i2c_device_id drv260x_id[] = {
+ { "drv2605l", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, drv260x_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id drv260x_of_match[] = {
+ { .compatible = "ti,drv2604", },
+ { .compatible = "ti,drv2604l", },
+ { .compatible = "ti,drv2605", },
+ { .compatible = "ti,drv2605l", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, drv260x_of_match);
+#endif
+
+static struct i2c_driver drv260x_driver = {
+ .probe = drv260x_probe,
+ .driver = {
+ .name = "drv260x-haptics",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(drv260x_of_match),
+ .pm = &drv260x_pm_ops,
+ },
+ .id_table = drv260x_id,
+};
+module_i2c_driver(drv260x_driver);
+
+MODULE_ALIAS("platform:drv260x-haptics");
+MODULE_DESCRIPTION("TI DRV260x haptics driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
new file mode 100644
index 000000000000..0f437581cc04
--- /dev/null
+++ b/drivers/input/misc/drv2667.c
@@ -0,0 +1,500 @@
+/*
+ * DRV2667 haptics driver family
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2014 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+/* Contol registers */
+#define DRV2667_STATUS 0x00
+#define DRV2667_CTRL_1 0x01
+#define DRV2667_CTRL_2 0x02
+/* Waveform sequencer */
+#define DRV2667_WV_SEQ_0 0x03
+#define DRV2667_WV_SEQ_1 0x04
+#define DRV2667_WV_SEQ_2 0x05
+#define DRV2667_WV_SEQ_3 0x06
+#define DRV2667_WV_SEQ_4 0x07
+#define DRV2667_WV_SEQ_5 0x08
+#define DRV2667_WV_SEQ_6 0x09
+#define DRV2667_WV_SEQ_7 0x0A
+#define DRV2667_FIFO 0x0B
+#define DRV2667_PAGE 0xFF
+#define DRV2667_MAX_REG DRV2667_PAGE
+
+#define DRV2667_PAGE_0 0x00
+#define DRV2667_PAGE_1 0x01
+#define DRV2667_PAGE_2 0x02
+#define DRV2667_PAGE_3 0x03
+#define DRV2667_PAGE_4 0x04
+#define DRV2667_PAGE_5 0x05
+#define DRV2667_PAGE_6 0x06
+#define DRV2667_PAGE_7 0x07
+#define DRV2667_PAGE_8 0x08
+
+/* RAM fields */
+#define DRV2667_RAM_HDR_SZ 0x0
+/* RAM Header addresses */
+#define DRV2667_RAM_START_HI 0x01
+#define DRV2667_RAM_START_LO 0x02
+#define DRV2667_RAM_STOP_HI 0x03
+#define DRV2667_RAM_STOP_LO 0x04
+#define DRV2667_RAM_REPEAT_CT 0x05
+/* RAM data addresses */
+#define DRV2667_RAM_AMP 0x06
+#define DRV2667_RAM_FREQ 0x07
+#define DRV2667_RAM_DURATION 0x08
+#define DRV2667_RAM_ENVELOPE 0x09
+
+/* Control 1 Register */
+#define DRV2667_25_VPP_GAIN 0x00
+#define DRV2667_50_VPP_GAIN 0x01
+#define DRV2667_75_VPP_GAIN 0x02
+#define DRV2667_100_VPP_GAIN 0x03
+#define DRV2667_DIGITAL_IN 0xfc
+#define DRV2667_ANALOG_IN (1 << 2)
+
+/* Control 2 Register */
+#define DRV2667_GO (1 << 0)
+#define DRV2667_STANDBY (1 << 6)
+#define DRV2667_DEV_RST (1 << 7)
+
+/* RAM Envelope settings */
+#define DRV2667_NO_ENV 0x00
+#define DRV2667_32_MS_ENV 0x01
+#define DRV2667_64_MS_ENV 0x02
+#define DRV2667_96_MS_ENV 0x03
+#define DRV2667_128_MS_ENV 0x04
+#define DRV2667_160_MS_ENV 0x05
+#define DRV2667_192_MS_ENV 0x06
+#define DRV2667_224_MS_ENV 0x07
+#define DRV2667_256_MS_ENV 0x08
+#define DRV2667_512_MS_ENV 0x09
+#define DRV2667_768_MS_ENV 0x0a
+#define DRV2667_1024_MS_ENV 0x0b
+#define DRV2667_1280_MS_ENV 0x0c
+#define DRV2667_1536_MS_ENV 0x0d
+#define DRV2667_1792_MS_ENV 0x0e
+#define DRV2667_2048_MS_ENV 0x0f
+
+/**
+ * struct drv2667_data -
+ * @input_dev - Pointer to the input device
+ * @client - Pointer to the I2C client
+ * @regmap - Register map of the device
+ * @work - Work item used to off load the enable/disable of the vibration
+ * @regulator - Pointer to the regulator for the IC
+ * @magnitude - Magnitude of the vibration event
+**/
+struct drv2667_data {
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct work_struct work;
+ struct regulator *regulator;
+ u32 page;
+ u32 magnitude;
+ u32 frequency;
+};
+
+static struct reg_default drv2667_reg_defs[] = {
+ { DRV2667_STATUS, 0x02 },
+ { DRV2667_CTRL_1, 0x28 },
+ { DRV2667_CTRL_2, 0x40 },
+ { DRV2667_WV_SEQ_0, 0x00 },
+ { DRV2667_WV_SEQ_1, 0x00 },
+ { DRV2667_WV_SEQ_2, 0x00 },
+ { DRV2667_WV_SEQ_3, 0x00 },
+ { DRV2667_WV_SEQ_4, 0x00 },
+ { DRV2667_WV_SEQ_5, 0x00 },
+ { DRV2667_WV_SEQ_6, 0x00 },
+ { DRV2667_WV_SEQ_7, 0x00 },
+ { DRV2667_FIFO, 0x00 },
+ { DRV2667_PAGE, 0x00 },
+};
+
+static int drv2667_set_waveform_freq(struct drv2667_data *haptics)
+{
+ unsigned int read_buf;
+ int freq;
+ int error;
+
+ /* Per the data sheet:
+ * Sinusoid Frequency (Hz) = 7.8125 x Frequency
+ */
+ freq = (haptics->frequency * 1000) / 78125;
+ if (freq <= 0) {
+ dev_err(&haptics->client->dev,
+ "ERROR: Frequency calculated to %i\n", freq);
+ return -EINVAL;
+ }
+
+ error = regmap_read(haptics->regmap, DRV2667_PAGE, &read_buf);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to read the page number: %d\n", error);
+ return -EIO;
+ }
+
+ if (read_buf == DRV2667_PAGE_0 ||
+ haptics->page != read_buf) {
+ error = regmap_write(haptics->regmap,
+ DRV2667_PAGE, haptics->page);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the page: %d\n", error);
+ return -EIO;
+ }
+ }
+
+ error = regmap_write(haptics->regmap, DRV2667_RAM_FREQ, freq);
+ if (error)
+ dev_err(&haptics->client->dev,
+ "Failed to set the frequency: %d\n", error);
+
+ /* Reset back to original page */
+ if (read_buf == DRV2667_PAGE_0 ||
+ haptics->page != read_buf) {
+ error = regmap_write(haptics->regmap, DRV2667_PAGE, read_buf);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the page: %d\n", error);
+ return -EIO;
+ }
+ }
+
+ return error;
+}
+
+static void drv2667_worker(struct work_struct *work)
+{
+ struct drv2667_data *haptics = container_of(work, struct drv2667_data, work);
+ int error;
+
+ if (haptics->magnitude) {
+ error = regmap_write(haptics->regmap,
+ DRV2667_PAGE, haptics->page);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the page: %d\n", error);
+ return;
+ }
+
+ error = regmap_write(haptics->regmap, DRV2667_RAM_AMP,
+ haptics->magnitude);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the amplitude: %d\n", error);
+ return;
+ }
+
+ error = regmap_write(haptics->regmap,
+ DRV2667_PAGE, DRV2667_PAGE_0);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the page: %d\n", error);
+ return;
+ }
+
+ error = regmap_write(haptics->regmap,
+ DRV2667_CTRL_2, DRV2667_GO);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to set the GO bit: %d\n", error);
+ }
+ } else {
+ error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
+ DRV2667_GO, 0);
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to unset the GO bit: %d\n", error);
+ }
+ }
+}
+
+static int drv2667_haptics_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct drv2667_data *haptics = input_get_drvdata(input);
+
+ if (effect->u.rumble.strong_magnitude > 0)
+ haptics->magnitude = effect->u.rumble.strong_magnitude;
+ else if (effect->u.rumble.weak_magnitude > 0)
+ haptics->magnitude = effect->u.rumble.weak_magnitude;
+ else
+ haptics->magnitude = 0;
+
+ schedule_work(&haptics->work);
+
+ return 0;
+}
+
+static void drv2667_close(struct input_dev *input)
+{
+ struct drv2667_data *haptics = input_get_drvdata(input);
+ int error;
+
+ cancel_work_sync(&haptics->work);
+
+ error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
+ DRV2667_STANDBY, 1);
+ if (error)
+ dev_err(&haptics->client->dev,
+ "Failed to enter standby mode: %d\n", error);
+}
+
+static const struct reg_default drv2667_init_regs[] = {
+ { DRV2667_CTRL_2, 0 },
+ { DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
+ { DRV2667_WV_SEQ_0, 1 },
+ { DRV2667_WV_SEQ_1, 0 }
+};
+
+static const struct reg_default drv2667_page1_init[] = {
+ { DRV2667_RAM_HDR_SZ, 0x05 },
+ { DRV2667_RAM_START_HI, 0x80 },
+ { DRV2667_RAM_START_LO, 0x06 },
+ { DRV2667_RAM_STOP_HI, 0x00 },
+ { DRV2667_RAM_STOP_LO, 0x09 },
+ { DRV2667_RAM_REPEAT_CT, 0 },
+ { DRV2667_RAM_DURATION, 0x05 },
+ { DRV2667_RAM_ENVELOPE, DRV2667_NO_ENV },
+ { DRV2667_RAM_AMP, 0x60 },
+};
+
+static int drv2667_init(struct drv2667_data *haptics)
+{
+ int error;
+
+ /* Set default haptic frequency to 195Hz on Page 1*/
+ haptics->frequency = 195;
+ haptics->page = DRV2667_PAGE_1;
+
+ error = regmap_register_patch(haptics->regmap,
+ drv2667_init_regs,
+ ARRAY_SIZE(drv2667_init_regs));
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write init registers: %d\n",
+ error);
+ return error;
+ }
+
+ error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page);
+ if (error) {
+ dev_err(&haptics->client->dev, "Failed to set page: %d\n",
+ error);
+ goto error_out;
+ }
+
+ error = drv2667_set_waveform_freq(haptics);
+ if (error)
+ goto error_page;
+
+ error = regmap_register_patch(haptics->regmap,
+ drv2667_page1_init,
+ ARRAY_SIZE(drv2667_page1_init));
+ if (error) {
+ dev_err(&haptics->client->dev,
+ "Failed to write page registers: %d\n",
+ error);
+ return error;
+ }
+
+ error = regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0);
+ return error;
+
+error_page:
+ regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0);
+error_out:
+ return error;
+}
+
+static const struct regmap_config drv2667_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = DRV2667_MAX_REG,
+ .reg_defaults = drv2667_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(drv2667_reg_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+static int drv2667_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct drv2667_data *haptics;
+ int error;
+
+ haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+ if (IS_ERR(haptics->regulator)) {
+ error = PTR_ERR(haptics->regulator);
+ dev_err(&client->dev,
+ "unable to get regulator, error: %d\n", error);
+ return error;
+ }
+
+ haptics->input_dev = devm_input_allocate_device(&client->dev);
+ if (!haptics->input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ haptics->input_dev->name = "drv2667:haptics";
+ haptics->input_dev->dev.parent = client->dev.parent;
+ haptics->input_dev->close = drv2667_close;
+ input_set_drvdata(haptics->input_dev, haptics);
+ input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(haptics->input_dev, NULL,
+ drv2667_haptics_play);
+ if (error) {
+ dev_err(&client->dev, "input_ff_create() failed: %d\n",
+ error);
+ return error;
+ }
+
+ INIT_WORK(&haptics->work, drv2667_worker);
+
+ haptics->client = client;
+ i2c_set_clientdata(client, haptics);
+
+ haptics->regmap = devm_regmap_init_i2c(client, &drv2667_regmap_config);
+ if (IS_ERR(haptics->regmap)) {
+ error = PTR_ERR(haptics->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ error = drv2667_init(haptics);
+ if (error) {
+ dev_err(&client->dev, "Device init failed: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(haptics->input_dev);
+ if (error) {
+ dev_err(&client->dev, "couldn't register input device: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int drv2667_suspend(struct device *dev)
+{
+ struct drv2667_data *haptics = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&haptics->input_dev->mutex);
+
+ if (haptics->input_dev->users) {
+ ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
+ DRV2667_STANDBY, 1);
+ if (ret) {
+ dev_err(dev, "Failed to set standby mode\n");
+ regulator_disable(haptics->regulator);
+ goto out;
+ }
+
+ ret = regulator_disable(haptics->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to disable regulator\n");
+ regmap_update_bits(haptics->regmap,
+ DRV2667_CTRL_2,
+ DRV2667_STANDBY, 0);
+ }
+ }
+out:
+ mutex_unlock(&haptics->input_dev->mutex);
+ return ret;
+}
+
+static int drv2667_resume(struct device *dev)
+{
+ struct drv2667_data *haptics = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&haptics->input_dev->mutex);
+
+ if (haptics->input_dev->users) {
+ ret = regulator_enable(haptics->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator\n");
+ goto out;
+ }
+
+ ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
+ DRV2667_STANDBY, 0);
+ if (ret) {
+ dev_err(dev, "Failed to unset standby mode\n");
+ regulator_disable(haptics->regulator);
+ goto out;
+ }
+
+ }
+
+out:
+ mutex_unlock(&haptics->input_dev->mutex);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume);
+
+static const struct i2c_device_id drv2667_id[] = {
+ { "drv2667", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, drv2667_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id drv2667_of_match[] = {
+ { .compatible = "ti,drv2667", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, drv2667_of_match);
+#endif
+
+static struct i2c_driver drv2667_driver = {
+ .probe = drv2667_probe,
+ .driver = {
+ .name = "drv2667-haptics",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(drv2667_of_match),
+ .pm = &drv2667_pm_ops,
+ },
+ .id_table = drv2667_id,
+};
+module_i2c_driver(drv2667_driver);
+
+MODULE_ALIAS("platform:drv2667-haptics");
+MODULE_DESCRIPTION("TI DRV2667 haptics driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
new file mode 100644
index 000000000000..d605db4d2f39
--- /dev/null
+++ b/drivers/input/misc/max77693-haptic.c
@@ -0,0 +1,357 @@
+/*
+ * MAXIM MAX77693 Haptic device driver
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Jaewon Kim <jaewon02.kim@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+#define MAX_MAGNITUDE_SHIFT 16
+
+enum max77693_haptic_motor_type {
+ MAX77693_HAPTIC_ERM = 0,
+ MAX77693_HAPTIC_LRA,
+};
+
+enum max77693_haptic_pulse_mode {
+ MAX77693_HAPTIC_EXTERNAL_MODE = 0,
+ MAX77693_HAPTIC_INTERNAL_MODE,
+};
+
+enum max77693_haptic_pwm_divisor {
+ MAX77693_HAPTIC_PWM_DIVISOR_32 = 0,
+ MAX77693_HAPTIC_PWM_DIVISOR_64,
+ MAX77693_HAPTIC_PWM_DIVISOR_128,
+ MAX77693_HAPTIC_PWM_DIVISOR_256,
+};
+
+struct max77693_haptic {
+ struct regmap *regmap_pmic;
+ struct regmap *regmap_haptic;
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct pwm_device *pwm_dev;
+ struct regulator *motor_reg;
+
+ bool enabled;
+ bool suspend_state;
+ unsigned int magnitude;
+ unsigned int pwm_duty;
+ enum max77693_haptic_motor_type type;
+ enum max77693_haptic_pulse_mode mode;
+ enum max77693_haptic_pwm_divisor pwm_divisor;
+
+ struct work_struct work;
+};
+
+static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
+{
+ int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
+ int error;
+
+ error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
+ if (error) {
+ dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int max77693_haptic_configure(struct max77693_haptic *haptic,
+ bool enable)
+{
+ unsigned int value;
+ int error;
+
+ value = ((haptic->type << MAX77693_CONFIG2_MODE) |
+ (enable << MAX77693_CONFIG2_MEN) |
+ (haptic->mode << MAX77693_CONFIG2_HTYP) |
+ (haptic->pwm_divisor));
+
+ error = regmap_write(haptic->regmap_haptic,
+ MAX77693_HAPTIC_REG_CONFIG2, value);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to update haptic config: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
+{
+ int error;
+
+ error = regmap_update_bits(haptic->regmap_pmic,
+ MAX77693_PMIC_REG_LSCNFG,
+ MAX77693_PMIC_LOW_SYS_MASK,
+ enable << MAX77693_PMIC_LOW_SYS_SHIFT);
+ if (error) {
+ dev_err(haptic->dev, "cannot update pmic regmap: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void max77693_haptic_enable(struct max77693_haptic *haptic)
+{
+ int error;
+
+ if (haptic->enabled)
+ return;
+
+ error = pwm_enable(haptic->pwm_dev);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to enable haptic pwm device: %d\n", error);
+ return;
+ }
+
+ error = max77693_haptic_lowsys(haptic, true);
+ if (error)
+ goto err_enable_lowsys;
+
+ error = max77693_haptic_configure(haptic, true);
+ if (error)
+ goto err_enable_config;
+
+ haptic->enabled = true;
+
+ return;
+
+err_enable_config:
+ max77693_haptic_lowsys(haptic, false);
+err_enable_lowsys:
+ pwm_disable(haptic->pwm_dev);
+}
+
+static void max77693_haptic_disable(struct max77693_haptic *haptic)
+{
+ int error;
+
+ if (haptic->enabled)
+ return;
+
+ error = max77693_haptic_configure(haptic, false);
+ if (error)
+ return;
+
+ error = max77693_haptic_lowsys(haptic, false);
+ if (error)
+ goto err_disable_lowsys;
+
+ pwm_disable(haptic->pwm_dev);
+ haptic->enabled = false;
+
+ return;
+
+err_disable_lowsys:
+ max77693_haptic_configure(haptic, true);
+}
+
+static void max77693_haptic_play_work(struct work_struct *work)
+{
+ struct max77693_haptic *haptic =
+ container_of(work, struct max77693_haptic, work);
+ int error;
+
+ error = max77693_haptic_set_duty_cycle(haptic);
+ if (error) {
+ dev_err(haptic->dev, "failed to set duty cycle: %d\n", error);
+ return;
+ }
+
+ if (haptic->magnitude)
+ max77693_haptic_enable(haptic);
+ else
+ max77693_haptic_disable(haptic);
+}
+
+static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct max77693_haptic *haptic = input_get_drvdata(dev);
+ uint64_t period_mag_multi;
+
+ haptic->magnitude = effect->u.rumble.strong_magnitude;
+ if (!haptic->magnitude)
+ haptic->magnitude = effect->u.rumble.weak_magnitude;
+
+ /*
+ * The magnitude comes from force-feedback interface.
+ * The formula to convert magnitude to pwm_duty as follows:
+ * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
+ */
+ period_mag_multi = (int64_t)(haptic->pwm_dev->period *
+ haptic->magnitude);
+ haptic->pwm_duty = (unsigned int)(period_mag_multi >>
+ MAX_MAGNITUDE_SHIFT);
+
+ schedule_work(&haptic->work);
+
+ return 0;
+}
+
+static int max77693_haptic_open(struct input_dev *dev)
+{
+ struct max77693_haptic *haptic = input_get_drvdata(dev);
+ int error;
+
+ error = regulator_enable(haptic->motor_reg);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void max77693_haptic_close(struct input_dev *dev)
+{
+ struct max77693_haptic *haptic = input_get_drvdata(dev);
+ int error;
+
+ cancel_work_sync(&haptic->work);
+ max77693_haptic_disable(haptic);
+
+ error = regulator_disable(haptic->motor_reg);
+ if (error)
+ dev_err(haptic->dev,
+ "failed to disable regulator: %d\n", error);
+}
+
+static int max77693_haptic_probe(struct platform_device *pdev)
+{
+ struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
+ struct max77693_haptic *haptic;
+ int error;
+
+ haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
+ if (!haptic)
+ return -ENOMEM;
+
+ haptic->regmap_pmic = max77693->regmap;
+ haptic->regmap_haptic = max77693->regmap_haptic;
+ haptic->dev = &pdev->dev;
+ haptic->type = MAX77693_HAPTIC_LRA;
+ haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE;
+ haptic->pwm_divisor = MAX77693_HAPTIC_PWM_DIVISOR_128;
+ haptic->suspend_state = false;
+
+ INIT_WORK(&haptic->work, max77693_haptic_play_work);
+
+ /* Get pwm and regulatot for haptic device */
+ haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(haptic->pwm_dev)) {
+ dev_err(&pdev->dev, "failed to get pwm device\n");
+ return PTR_ERR(haptic->pwm_dev);
+ }
+
+ haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
+ if (IS_ERR(haptic->motor_reg)) {
+ dev_err(&pdev->dev, "failed to get regulator\n");
+ return PTR_ERR(haptic->motor_reg);
+ }
+
+ /* Initialize input device for haptic device */
+ haptic->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!haptic->input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ haptic->input_dev->name = "max77693-haptic";
+ haptic->input_dev->id.version = 1;
+ haptic->input_dev->dev.parent = &pdev->dev;
+ haptic->input_dev->open = max77693_haptic_open;
+ haptic->input_dev->close = max77693_haptic_close;
+ input_set_drvdata(haptic->input_dev, haptic);
+ input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(haptic->input_dev, NULL,
+ max77693_haptic_play_effect);
+ if (error) {
+ dev_err(&pdev->dev, "failed to create force-feedback\n");
+ return error;
+ }
+
+ error = input_register_device(haptic->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ platform_set_drvdata(pdev, haptic);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77693_haptic_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max77693_haptic *haptic = platform_get_drvdata(pdev);
+
+ if (haptic->enabled) {
+ max77693_haptic_disable(haptic);
+ haptic->suspend_state = true;
+ }
+
+ return 0;
+}
+
+static int max77693_haptic_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max77693_haptic *haptic = platform_get_drvdata(pdev);
+
+ if (haptic->suspend_state) {
+ max77693_haptic_enable(haptic);
+ haptic->suspend_state = false;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
+ max77693_haptic_suspend, max77693_haptic_resume);
+
+static struct platform_driver max77693_haptic_driver = {
+ .driver = {
+ .name = "max77693-haptic",
+ .owner = THIS_MODULE,
+ .pm = &max77693_haptic_pm_ops,
+ },
+ .probe = max77693_haptic_probe,
+};
+module_platform_driver(max77693_haptic_driver);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_DESCRIPTION("MAXIM MAX77693 Haptic driver");
+MODULE_ALIAS("platform:max77693-haptic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
new file mode 100644
index 000000000000..f505ac3a8d87
--- /dev/null
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -0,0 +1,332 @@
+/*
+ * Texas Instruments' Palmas Power Button Input Driver
+ *
+ * Copyright (C) 2012-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Girish S Ghongdemath
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/palmas.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PALMAS_LPK_TIME_MASK 0x0c
+#define PALMAS_PWRON_DEBOUNCE_MASK 0x03
+#define PALMAS_PWR_KEY_Q_TIME_MS 20
+
+/**
+ * struct palmas_pwron - Palmas power on data
+ * @palmas: pointer to palmas device
+ * @input_dev: pointer to input device
+ * @input_work: work for detecting release of key
+ * @irq: irq that we are hooked on to
+ */
+struct palmas_pwron {
+ struct palmas *palmas;
+ struct input_dev *input_dev;
+ struct delayed_work input_work;
+ int irq;
+};
+
+/**
+ * struct palmas_pwron_config - configuration of palmas power on
+ * @long_press_time_val: value for long press h/w shutdown event
+ * @pwron_debounce_val: value for debounce of power button
+ */
+struct palmas_pwron_config {
+ u8 long_press_time_val;
+ u8 pwron_debounce_val;
+};
+
+/**
+ * palmas_power_button_work() - Detects the button release event
+ * @work: work item to detect button release
+ */
+static void palmas_power_button_work(struct work_struct *work)
+{
+ struct palmas_pwron *pwron = container_of(work,
+ struct palmas_pwron,
+ input_work.work);
+ struct input_dev *input_dev = pwron->input_dev;
+ unsigned int reg;
+ int error;
+
+ error = palmas_read(pwron->palmas, PALMAS_INTERRUPT_BASE,
+ PALMAS_INT1_LINE_STATE, &reg);
+ if (error) {
+ dev_err(input_dev->dev.parent,
+ "Cannot read palmas PWRON status: %d\n", error);
+ } else if (reg & BIT(1)) {
+ /* The button is released, report event. */
+ input_report_key(input_dev, KEY_POWER, 0);
+ input_sync(input_dev);
+ } else {
+ /* The button is still depressed, keep checking. */
+ schedule_delayed_work(&pwron->input_work,
+ msecs_to_jiffies(PALMAS_PWR_KEY_Q_TIME_MS));
+ }
+}
+
+/**
+ * pwron_irq() - button press isr
+ * @irq: irq
+ * @palmas_pwron: pwron struct
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t pwron_irq(int irq, void *palmas_pwron)
+{
+ struct palmas_pwron *pwron = palmas_pwron;
+ struct input_dev *input_dev = pwron->input_dev;
+
+ input_report_key(input_dev, KEY_POWER, 1);
+ pm_wakeup_event(input_dev->dev.parent, 0);
+ input_sync(input_dev);
+
+ mod_delayed_work(system_wq, &pwron->input_work,
+ msecs_to_jiffies(PALMAS_PWR_KEY_Q_TIME_MS));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * palmas_pwron_params_ofinit() - device tree parameter parser
+ * @dev: palmas button device
+ * @config: configuration params that this fills up
+ */
+static void palmas_pwron_params_ofinit(struct device *dev,
+ struct palmas_pwron_config *config)
+{
+ struct device_node *np;
+ u32 val;
+ int i, error;
+ u8 lpk_times[] = { 6, 8, 10, 12 };
+ int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
+
+ memset(config, 0, sizeof(*config));
+
+ /* Default config parameters */
+ config->long_press_time_val = ARRAY_SIZE(lpk_times) - 1;
+
+ np = dev->of_node;
+ if (!np)
+ return;
+
+ error = of_property_read_u32(np, "ti,palmas-long-press-seconds", &val);
+ if (!error) {
+ for (i = 0; i < ARRAY_SIZE(lpk_times); i++) {
+ if (val <= lpk_times[i]) {
+ config->long_press_time_val = i;
+ break;
+ }
+ }
+ }
+
+ error = of_property_read_u32(np,
+ "ti,palmas-pwron-debounce-milli-seconds",
+ &val);
+ if (!error) {
+ for (i = 0; i < ARRAY_SIZE(pwr_on_deb_ms); i++) {
+ if (val <= pwr_on_deb_ms[i]) {
+ config->pwron_debounce_val = i;
+ break;
+ }
+ }
+ }
+
+ dev_info(dev, "h/w controlled shutdown duration=%d seconds\n",
+ lpk_times[config->long_press_time_val]);
+}
+
+/**
+ * palmas_pwron_probe() - probe
+ * @pdev: platform device for the button
+ *
+ * Return: 0 for successful probe else appropriate error
+ */
+static int palmas_pwron_probe(struct platform_device *pdev)
+{
+ struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct input_dev *input_dev;
+ struct palmas_pwron *pwron;
+ struct palmas_pwron_config config;
+ int val;
+ int error;
+
+ palmas_pwron_params_ofinit(dev, &config);
+
+ pwron = kzalloc(sizeof(*pwron), GFP_KERNEL);
+ if (!pwron)
+ return -ENOMEM;
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(dev, "Can't allocate power button\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_dev->name = "palmas_pwron";
+ input_dev->phys = "palmas_pwron/input0";
+ input_dev->dev.parent = dev;
+
+ input_set_capability(input_dev, EV_KEY, KEY_POWER);
+
+ /*
+ * Setup default hardware shutdown option (long key press)
+ * and debounce.
+ */
+ val = config.long_press_time_val << __ffs(PALMAS_LPK_TIME_MASK);
+ val |= config.pwron_debounce_val << __ffs(PALMAS_PWRON_DEBOUNCE_MASK);
+ error = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_LONG_PRESS_KEY,
+ PALMAS_LPK_TIME_MASK |
+ PALMAS_PWRON_DEBOUNCE_MASK,
+ val);
+ if (error) {
+ dev_err(dev, "LONG_PRESS_KEY_UPDATE failed: %d\n", error);
+ goto err_free_input;
+ }
+
+ pwron->palmas = palmas;
+ pwron->input_dev = input_dev;
+
+ INIT_DELAYED_WORK(&pwron->input_work, palmas_power_button_work);
+
+ pwron->irq = platform_get_irq(pdev, 0);
+ error = request_threaded_irq(pwron->irq, NULL, pwron_irq,
+ IRQF_TRIGGER_HIGH |
+ IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT,
+ dev_name(dev), pwron);
+ if (error) {
+ dev_err(dev, "Can't get IRQ for pwron: %d\n", error);
+ goto err_free_input;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(dev, "Can't register power button: %d\n", error);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, pwron);
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_free_irq:
+ cancel_delayed_work_sync(&pwron->input_work);
+ free_irq(pwron->irq, pwron);
+err_free_input:
+ input_free_device(input_dev);
+err_free_mem:
+ kfree(pwron);
+ return error;
+}
+
+/**
+ * palmas_pwron_remove() - Cleanup on removal
+ * @pdev: platform device for the button
+ *
+ * Return: 0
+ */
+static int palmas_pwron_remove(struct platform_device *pdev)
+{
+ struct palmas_pwron *pwron = platform_get_drvdata(pdev);
+
+ free_irq(pwron->irq, pwron);
+ cancel_delayed_work_sync(&pwron->input_work);
+
+ input_unregister_device(pwron->input_dev);
+ kfree(pwron);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * palmas_pwron_suspend() - suspend handler
+ * @dev: power button device
+ *
+ * Cancel all pending work items for the power button, setup irq for wakeup
+ *
+ * Return: 0
+ */
+static int palmas_pwron_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct palmas_pwron *pwron = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&pwron->input_work);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(pwron->irq);
+
+ return 0;
+}
+
+/**
+ * palmas_pwron_resume() - resume handler
+ * @dev: power button device
+ *
+ * Just disable the wakeup capability of irq here.
+ *
+ * Return: 0
+ */
+static int palmas_pwron_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct palmas_pwron *pwron = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(pwron->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
+ palmas_pwron_suspend, palmas_pwron_resume);
+
+#ifdef CONFIG_OF
+static struct of_device_id of_palmas_pwr_match[] = {
+ { .compatible = "ti,palmas-pwrbutton" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, of_palmas_pwr_match);
+#endif
+
+static struct platform_driver palmas_pwron_driver = {
+ .probe = palmas_pwron_probe,
+ .remove = palmas_pwron_remove,
+ .driver = {
+ .name = "palmas_pwrbutton",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_palmas_pwr_match),
+ .pm = &palmas_pwron_pm,
+ },
+};
+module_platform_driver(palmas_pwron_driver);
+
+MODULE_ALIAS("platform:palmas-pwrbutton");
+MODULE_DESCRIPTION("Palmas Power Button");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Texas Instruments Inc.");
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e34dfc29beb3..735604753568 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -18,7 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
#include <linux/platform_device.h>
-#include <linux/pnp.h>
+#include <linux/acpi.h>
/*
* Definition of buttons on the tablet. The ACPI index of each button
@@ -67,7 +67,7 @@ static int soc_button_lookup_gpio(struct device *dev, int acpi_index)
}
static struct platform_device *
-soc_button_device_create(struct pnp_dev *pdev,
+soc_button_device_create(struct platform_device *pdev,
const struct soc_button_info *button_info,
bool autorepeat)
{
@@ -138,30 +138,40 @@ err_free_mem:
return ERR_PTR(error);
}
-static void soc_button_remove(struct pnp_dev *pdev)
+static int soc_button_remove(struct platform_device *pdev)
{
- struct soc_button_data *priv = pnp_get_drvdata(pdev);
+ struct soc_button_data *priv = platform_get_drvdata(pdev);
+
int i;
for (i = 0; i < BUTTON_TYPES; i++)
if (priv->children[i])
platform_device_unregister(priv->children[i]);
+
+ return 0;
}
-static int soc_button_pnp_probe(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int soc_button_probe(struct platform_device *pdev)
{
- const struct soc_button_info *button_info = (void *)id->driver_data;
+ struct device *dev = &pdev->dev;
+ const struct acpi_device_id *id;
+ struct soc_button_info *button_info;
struct soc_button_data *priv;
struct platform_device *pd;
int i;
int error;
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ button_info = (struct soc_button_info *)id->driver_data;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- pnp_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, priv);
for (i = 0; i < BUTTON_TYPES; i++) {
pd = soc_button_device_create(pdev, button_info, i == 0);
@@ -192,30 +202,22 @@ static struct soc_button_info soc_button_PNP0C40[] = {
{ }
};
-static const struct pnp_device_id soc_button_pnp_match[] = {
- { .id = "PNP0C40", .driver_data = (long)soc_button_PNP0C40 },
- { .id = "" }
+static const struct acpi_device_id soc_button_acpi_match[] = {
+ { "PNP0C40", (unsigned long)soc_button_PNP0C40 },
+ { }
};
-MODULE_DEVICE_TABLE(pnp, soc_button_pnp_match);
-static struct pnp_driver soc_button_pnp_driver = {
- .name = KBUILD_MODNAME,
- .id_table = soc_button_pnp_match,
- .probe = soc_button_pnp_probe,
+MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match);
+
+static struct platform_driver soc_button_driver = {
+ .probe = soc_button_probe,
.remove = soc_button_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(soc_button_acpi_match),
+ },
};
-
-static int __init soc_button_init(void)
-{
- return pnp_register_driver(&soc_button_pnp_driver);
-}
-
-static void __exit soc_button_exit(void)
-{
- pnp_unregister_driver(&soc_button_pnp_driver);
-}
-
-module_init(soc_button_init);
-module_exit(soc_button_exit);
+module_platform_driver(soc_button_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index c25efdb3f288..dda507f8b3a2 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_MOUSE_SYNAPTICS_I2C) += synaptics_i2c.o
obj-$(CONFIG_MOUSE_SYNAPTICS_USB) += synaptics_usb.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
-psmouse-objs := psmouse-base.o synaptics.o
+psmouse-objs := psmouse-base.o synaptics.o focaltech.o
psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
psmouse-$(CONFIG_MOUSE_PS2_ELANTECH) += elantech.o
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
new file mode 100644
index 000000000000..f4d657ee1cc0
--- /dev/null
+++ b/drivers/input/mouse/focaltech.c
@@ -0,0 +1,52 @@
+/*
+ * Focaltech TouchPad PS/2 mouse driver
+ *
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Red Hat authors:
+ *
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+/*
+ * The Focaltech PS/2 touchpad protocol is unknown. This drivers deals with
+ * detection only, to avoid further detection attempts confusing the touchpad
+ * this way it at least works in PS/2 mouse compatibility mode.
+ */
+
+#include <linux/device.h>
+#include <linux/libps2.h>
+#include "psmouse.h"
+
+static const char * const focaltech_pnp_ids[] = {
+ "FLT0101",
+ "FLT0102",
+ "FLT0103",
+ NULL
+};
+
+int focaltech_detect(struct psmouse *psmouse, bool set_properties)
+{
+ if (!psmouse_matches_pnp_id(psmouse, focaltech_pnp_ids))
+ return -ENODEV;
+
+ if (set_properties) {
+ psmouse->vendor = "FocalTech";
+ psmouse->name = "FocalTech Touchpad in mouse emulation mode";
+ }
+
+ return 0;
+}
+
+int focaltech_init(struct psmouse *psmouse)
+{
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
+ psmouse_reset(psmouse);
+
+ return 0;
+}
diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h
new file mode 100644
index 000000000000..498650c61e28
--- /dev/null
+++ b/drivers/input/mouse/focaltech.h
@@ -0,0 +1,22 @@
+/*
+ * Focaltech TouchPad PS/2 mouse driver
+ *
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Red Hat authors:
+ *
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _FOCALTECH_H
+#define _FOCALTECH_H
+
+int focaltech_detect(struct psmouse *psmouse, bool set_properties);
+int focaltech_init(struct psmouse *psmouse);
+
+#endif
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index b4e1f014ddc2..26994f6a2b2a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -35,6 +35,7 @@
#include "elantech.h"
#include "sentelic.h"
#include "cypress_ps2.h"
+#include "focaltech.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -462,6 +463,20 @@ static int psmouse_poll(struct psmouse *psmouse)
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
}
+/*
+ * psmouse_matches_pnp_id - check if psmouse matches one of the passed in ids.
+ */
+bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
+{
+ int i;
+
+ if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
+ for (i = 0; ids[i]; i++)
+ if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
+ return true;
+
+ return false;
+}
/*
* Genius NetMouse magic init.
@@ -708,6 +723,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
{
bool synaptics_hardware = false;
+/* Always check for focaltech, this is safe as it uses pnp-id matching */
+ if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
+ if (!set_properties || focaltech_init(psmouse) == 0) {
+ /*
+ * Not supported yet, use bare protocol.
+ * Note that we need to also restrict
+ * psmouse_max_proto so that psmouse_initialize()
+ * does not try to reset rate and resolution,
+ * because even that upsets the device.
+ */
+ psmouse_max_proto = PSMOUSE_PS2;
+ return PSMOUSE_PS2;
+ }
+ }
+
/*
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 2f0b39d59a9b..f4cf664c7db3 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -108,6 +108,7 @@ void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse);
int psmouse_activate(struct psmouse *psmouse);
int psmouse_deactivate(struct psmouse *psmouse);
+bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[]);
struct psmouse_attribute {
struct device_attribute dattr;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index fd23181c1fb7..6394d9b5bfd3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -185,18 +185,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
NULL
};
-static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
-{
- int i;
-
- if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
- for (i = 0; ids[i]; i++)
- if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
- return true;
-
- return false;
-}
-
/*****************************************************************************
* Synaptics communications functions
****************************************************************************/
@@ -362,7 +350,8 @@ static int synaptics_resolution(struct psmouse *psmouse)
}
for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
- if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+ if (psmouse_matches_pnp_id(psmouse,
+ min_max_pnpid_table[i].pnp_ids)) {
priv->x_min = min_max_pnpid_table[i].x_min;
priv->x_max = min_max_pnpid_table[i].x_max;
priv->y_min = min_max_pnpid_table[i].y_min;
@@ -1492,7 +1481,7 @@ static void set_input_params(struct psmouse *psmouse,
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+ if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
__set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
/* Clickpads report only left button */
__clear_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 713e3ddb43bd..40b7d6c0ff17 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -466,6 +466,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ },
+ {
/* Avatar AVIU-145A6 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 5f578e850fc5..90d734bbf467 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -402,9 +402,11 @@ static void __mn_flush_page(struct mmu_notifier *mn,
static int mn_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long address)
+ unsigned long start,
+ unsigned long end)
{
- __mn_flush_page(mn, address);
+ for (; start < end; start += PAGE_SIZE)
+ __mn_flush_page(mn, start);
return 0;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b8632bf9a7f3..a31a9e40eed9 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -109,7 +109,7 @@ config XTENSA_MX
config IRQ_CROSSBAR
bool
help
- Support for a CROSSBAR ip that preceeds the main interrupt controller.
+ Support for a CROSSBAR ip that precedes the main interrupt controller.
The primary irqchip invokes the crossbar's callback which inturn allocates
a free irq and configures the IP. Thus the peripheral interrupts are
routed to one of the free irqchip interrupt lines.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d7690f86fdb9..55de4f6f7eaf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
has_nonrot_disk = 0;
choose_next_idle = 0;
- if (conf->mddev->recovery_cp < MaxSector &&
- (this_sector + sectors >= conf->next_resync))
- choose_first = 1;
- else
- choose_first = 0;
+ choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
sector_t dist;
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-static void raise_barrier(struct r1conf *conf)
+static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
spin_lock_irq(&conf->resync_lock);
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf)
/* block any new IO from starting */
conf->barrier++;
+ conf->next_resync = sector_nr;
/* For these conditions we must wait:
* A: while the array is in frozen state
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf)
* C: next_resync + RESYNC_SECTORS > start_next_window, meaning
* next resync will reach to the window which normal bios are
* handling.
+ * D: while there are any active requests in the current window.
*/
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
conf->barrier < RESYNC_DEPTH &&
+ conf->current_window_requests == 0 &&
(conf->start_next_window >=
conf->next_resync + RESYNC_SECTORS),
conf->resync_lock);
+ conf->nr_pending++;
spin_unlock_irq(&conf->resync_lock);
}
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf)
BUG_ON(conf->barrier <= 0);
spin_lock_irqsave(&conf->resync_lock, flags);
conf->barrier--;
+ conf->nr_pending--;
spin_unlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
if (conf->array_frozen || !bio)
wait = true;
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
- if (conf->next_resync < RESYNC_WINDOW_SECTORS)
- wait = true;
- else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
- >= bio_end_sector(bio)) ||
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_iter.bi_sector))
+ if ((conf->mddev->curr_resync_completed
+ >= bio_end_sector(bio)) ||
+ (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_iter.bi_sector))
wait = false;
else
wait = true;
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
}
if (bio && bio_data_dir(bio) == WRITE) {
- if (conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_iter.bi_sector) {
+ if (bio->bi_iter.bi_sector >=
+ conf->mddev->curr_resync_completed) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
@@ -1186,6 +1185,7 @@ read_again:
atomic_read(&bitmap->behind_writes) == 0);
}
r1_bio->read_disk = rdisk;
+ r1_bio->start_next_window = 0;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf)
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
+ spin_lock_irq(&conf->resync_lock);
conf->next_resync = 0;
conf->start_next_window = MaxSector;
+ conf->current_window_requests +=
+ conf->next_window_requests;
+ conf->next_window_requests = 0;
+ spin_unlock_irq(&conf->resync_lock);
}
static int raid1_spare_active(struct mddev *mddev)
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags))
+ !test_bit(Faulty, &rdev->flags))
r1_sync_page_io(rdev, sect, s,
conf->tmppage, WRITE);
}
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
+ !test_bit(Faulty, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
- raise_barrier(conf);
- conf->next_resync = sector_nr;
+ raise_barrier(conf, sector_nr);
rcu_read_lock();
/*
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 183588b11fc1..9f0fbecd1eb5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -64,6 +64,10 @@
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
+static bool devices_handle_discard_safely = false;
+module_param(devices_handle_discard_safely, bool, 0644);
+MODULE_PARM_DESC(devices_handle_discard_safely,
+ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
static struct workqueue_struct *raid5_wq;
/*
* Stripe cache
@@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev)
mddev->queue->limits.discard_granularity = stripe;
/*
* unaligned part of discard request will be ignored, so can't
- * guarantee discard_zerors_data
+ * guarantee discard_zeroes_data
*/
mddev->queue->limits.discard_zeroes_data = 0;
@@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev)
!bdev_get_queue(rdev->bdev)->
limits.discard_zeroes_data)
discard_supported = false;
+ /* Unfortunately, discard_zeroes_data is not currently
+ * a guarantee - just a hint. So we only allow DISCARD
+ * if the sysadmin has confirmed that only safe devices
+ * are in use by setting a module parameter.
+ */
+ if (!devices_handle_discard_safely) {
+ if (discard_supported) {
+ pr_info("md/raid456: discard support disabled due to uncertainty.\n");
+ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
+ }
+ discard_supported = false;
+ }
}
if (discard_supported &&
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 103ef6bad2e2..be763150b8aa 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
{
struct v4l2_ctrl_config cfg;
+ memset(&cfg, 0, sizeof(cfg));
cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags);
cfg.ops = &cx2341x_ops;
cfg.id = id;
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 72fb5838cae0..7975c6608e20 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
sizeof(state->tuner_i2c_adapter.name));
state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
+ state->tuner_i2c_adapter.dev.parent = i2c->dev.parent;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) {
err("tuner i2c bus could not be initialized\n");
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d4fa213ba74a..de88b980a837 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "HDCP keys read: %s%s\n",
(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
- if (!is_hdmi(sd)) {
+ if (is_hdmi(sd)) {
bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
bool audio_mute = io_read(sd, 0x65) & 0x40;
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index e6588ee5bdb0..4cf8f18bf097 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -224,7 +224,7 @@ static inline unsigned int lpf_count_to_us(unsigned int count)
}
/*
- * FIFO register pulse width count compuations
+ * FIFO register pulse width count computations
*/
static u32 clock_divider_to_resolution(u16 divider)
{
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index 2c951dec2d33..c2ff5fc01157 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -263,7 +263,7 @@ static inline unsigned int lpf_count_to_us(unsigned int count)
}
/*
- * FIFO register pulse width count compuations
+ * FIFO register pulse width count computations
*/
static u32 clock_divider_to_resolution(u16 divider)
{
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 998919e97dfe..7b35e633118d 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
#include <linux/kthread.h>
diff --git a/drivers/media/rc/keymaps/Kconfig b/drivers/media/rc/keymaps/Kconfig
index 8e615fd55852..767423bbbdd0 100644
--- a/drivers/media/rc/keymaps/Kconfig
+++ b/drivers/media/rc/keymaps/Kconfig
@@ -12,4 +12,4 @@ config RC_MAP
The ir-keytable program, available at v4l-utils package
provide the tool and the same RC maps for load from
userspace. Its available at
- http://git.linuxtv.org/v4l-utils
+ http://git.linuxtv.org/cgit.cgi/v4l-utils.git/
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a7e24848f6c8..9da812b8a786 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = {
.disconnect = em28xx_usb_disconnect,
.suspend = em28xx_usb_suspend,
.resume = em28xx_usb_resume,
+ .reset_resume = em28xx_usb_resume,
.id_table = em28xx_id_table,
};
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 90dec2955f1c..29abc379551e 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
- if (v4l2->streaming_users > 0)
+ if (vb2_is_busy(&v4l2->vb_vidq))
return -EBUSY;
vidioc_try_fmt_vid_cap(file, priv, f);
@@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp)
return -EINVAL;
}
- em28xx_videodbg("open dev=%s type=%s\n",
- video_device_node_name(vdev), v4l2_type_names[fh_type]);
+ em28xx_videodbg("open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[fh_type],
+ v4l2->users);
if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
@@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp)
return ret;
}
- if (v4l2_fh_is_singular_file(filp)) {
- em28xx_videodbg("first opened filehandle, initializing device\n");
-
+ if (v4l2->users == 0) {
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
if (vdev->vfl_type != VFL_TYPE_RADIO)
@@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp)
* of some i2c devices
*/
em28xx_wake_i2c(dev);
- } else {
- em28xx_videodbg("further filehandles are already opened\n");
}
if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp)
kref_get(&dev->ref);
kref_get(&v4l2->ref);
+ v4l2->users++;
mutex_unlock(&dev->lock);
@@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp)
struct em28xx_v4l2 *v4l2 = dev->v4l2;
int errCode;
- mutex_lock(&dev->lock);
+ em28xx_videodbg("users=%d\n", v4l2->users);
- if (v4l2_fh_is_singular_file(filp)) {
- em28xx_videodbg("last opened filehandle, shutting down device\n");
+ vb2_fop_release(filp);
+ mutex_lock(&dev->lock);
+ if (v4l2->users == 1) {
/* No sense to try to write to the device */
if (dev->disconnected)
goto exit;
@@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_errdev("cannot change alternate number to "
"0 (error=%i)\n", errCode);
}
- } else {
- em28xx_videodbg("further opened filehandles left\n");
}
exit:
- vb2_fop_release(filp);
+ v4l2->users--;
kref_put(&v4l2->ref, em28xx_free_v4l2);
mutex_unlock(&dev->lock);
kref_put(&dev->ref, em28xx_free_device);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 84ef8efdb148..4360338e7b31 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -524,6 +524,7 @@ struct em28xx_v4l2 {
int sensor_yres;
int sensor_xtal;
+ int users; /* user count for exclusive use */
int streaming_users; /* number of actively streaming users */
u32 frequency; /* selected tuner frequency */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
index e60cbb3aa609..f86cec091bf4 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
@@ -259,7 +259,7 @@ static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
fine = val % VV6410_CIF_LINELENGTH;
coarse = min(512, val / VV6410_CIF_LINELENGTH);
- PDEBUG(D_CONF, "Set coarse exposure to %d, fine expsure to %d",
+ PDEBUG(D_CONF, "Set coarse exposure to %d, fine exposure to %d",
coarse, fine);
err = stv06xx_write_sensor(sd, VV6410_FINEH, fine >> 8);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 9ca0f8d59a14..ba7e21a73023 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -25,6 +25,15 @@ config VIDEO_FIXED_MINOR_RANGES
When in doubt, say N.
+config VIDEO_PCI_SKELETON
+ tristate "Skeleton PCI V4L2 driver"
+ depends on PCI && BUILD_DOCSRC
+ depends on VIDEO_V4L2 && VIDEOBUF2_CORE
+ depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Enable build of the skeleton PCI driver, used as a reference
+ when developing new drivers.
+
# Used by drivers that need tuner.ko
config VIDEO_TUNER
tristate
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index c359006074a8..25d3ae2188cb 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* to the userspace.
*/
req->count = allocated_buffers;
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
return 0;
}
@@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
q->memory = create->memory;
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
}
num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
@@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
*/
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
@@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
return;
- if (!q->start_streaming_called) {
- if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
- state = VB2_BUF_STATE_QUEUED;
- } else if (WARN_ON(state != VB2_BUF_STATE_DONE &&
- state != VB2_BUF_STATE_ERROR)) {
- state = VB2_BUF_STATE_ERROR;
- }
+ if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR &&
+ state != VB2_BUF_STATE_QUEUED))
+ state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
@@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
q->start_streaming_called = 0;
dprintk(1, "driver refused to start streaming\n");
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * after a failed start_streaming(). See the start_streaming()
+ * documentation in videobuf2-core.h for more information how buffers
+ * should be returned to vb2 in start_streaming().
+ */
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
unsigned i;
@@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
/* Must be zero now */
WARN_ON(atomic_read(&q->owned_by_drv_count));
}
+ /*
+ * If done_list is not empty, then start_streaming() didn't call
+ * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+ * STATE_DONE.
+ */
+ WARN_ON(!list_empty(&q->done_list));
return ret;
}
@@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
*/
list_add_tail(&vb->queued_entry, &q->queued_list);
q->queued_count++;
+ q->waiting_for_buffers = false;
vb->state = VB2_BUF_STATE_QUEUED;
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
/*
@@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
if (q->start_streaming_called)
call_void_qop(q, stop_streaming, q);
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * in stop_streaming(). See the stop_streaming() documentation in
+ * videobuf2-core.h for more information how buffers should be returned
+ * to vb2 in stop_streaming().
+ */
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
for (i = 0; i < q->num_buffers; ++i)
if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
@@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
* their normal dequeued state.
*/
__vb2_queue_cancel(q);
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
dprintk(3, "successful\n");
return 0;
@@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
}
/*
- * There is nothing to wait for if no buffer has been queued and the
- * queue isn't streaming, or if the error flag is set.
+ * There is nothing to wait for if the queue isn't streaming, or if the
+ * error flag is set.
+ */
+ if (!vb2_is_streaming(q) || q->error)
+ return res | POLLERR;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
*/
- if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error)
+ if (q->waiting_for_buffers)
return res | POLLERR;
/*
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index adefc31bb853..9b163a440f89 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
goto fail_pages_alloc;
ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
- buf->num_pages, 0, size, gfp_flags);
+ buf->num_pages, 0, size, GFP_KERNEL);
if (ret)
goto fail_table_alloc;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a896d948b79e..187f83629f7e 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1400,7 +1400,6 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
* @vendor: pci vendor id
* @device: pci device id
* @revision: pci revision id
- * @prod_name: string returned
*
* Returns product string displayed when driver loads,
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
@@ -3172,12 +3171,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
}
- sz = facts->FWImageSize;
- if ( sz & 0x01 )
- sz += 1;
- if ( sz & 0x02 )
- sz += 2;
- facts->FWImageSize = sz;
+ facts->FWImageSize = ALIGN(facts->FWImageSize, 4);
if (!facts->RequestFrameSize) {
/* Something is wrong! */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b0a892a2bf1b..70bb7530b22c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1741,12 +1741,7 @@ mptctl_replace_fw (unsigned long arg)
/* Allocate memory for the new FW image
*/
- newFwSize = karg.newImageSize;
-
- if (newFwSize & 0x01)
- newFwSize += 1;
- if (newFwSize & 0x02)
- newFwSize += 2;
+ newFwSize = ALIGN(karg.newImageSize, 4);
mpt_alloc_fw_memory(ioc, newFwSize);
if (ioc->cached_fw == NULL)
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 787933d43d32..613231c16194 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1419,6 +1419,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_mptspi_probe;
}
+ /* VMWare emulation doesn't properly implement WRITE_SAME
+ */
+ if (pdev->subsystem_vendor == 0x15AD)
+ sh->no_write_same = 1;
+
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a0e9422b55a2..959c313d84a7 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -424,7 +424,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
}
/**
- * mei_me_set_pm_domain - fill and set pm domian stucture for device
+ * mei_me_set_pm_domain - fill and set pm domain structure for device
*
* @dev: mei_device
*/
@@ -444,7 +444,7 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev)
}
/**
- * mei_me_unset_pm_domain - clean pm domian stucture for device
+ * mei_me_unset_pm_domain - clean pm domain structure for device
*
* @dev: mei_device
*/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 19de57368b7a..74727dda51c1 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -371,7 +371,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
}
/**
- * mei_txe_set_pm_domain - fill and set pm domian stucture for device
+ * mei_txe_set_pm_domain - fill and set pm domain structure for device
*
* @dev: mei_device
*/
@@ -391,7 +391,7 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev)
}
/**
- * mei_txe_unset_pm_domain - clean pm domian stucture for device
+ * mei_txe_unset_pm_domain - clean pm domain structure for device
*
* @dev: mei_device
*/
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 5cd532ca1cfe..b01b0ce4d1be 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -36,7 +36,7 @@ static ssize_t state_show(struct slave *slave, char *buf)
case BOND_STATE_BACKUP:
return sprintf(buf, "backup\n");
default:
- return sprintf(buf, "UNKONWN\n");
+ return sprintf(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2fee73b878c2..823d01c5684c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
skb->protocol = eth_type_trans(skb, bp->dev);
- if ((len > (bp->dev->mtu + ETH_HLEN)) &&
- (ntohs(skb->protocol) != 0x8100)) {
+ if (len > (bp->dev->mtu + ETH_HLEN) &&
+ skb->protocol != htons(0x8100) &&
+ skb->protocol != htons(ETH_P_8021AD)) {
dev_kfree_skb(skb);
goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e7d3a620d96a..ba499489969a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
skb->protocol = eth_type_trans(skb, tp->dev);
if (len > (tp->dev->mtu + ETH_HLEN) &&
- skb->protocol != htons(ETH_P_8021Q)) {
+ skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)) {
dev_kfree_skb_any(skb);
goto drop_it_no_recycle;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca5d7798b265..e1e02fba4fcc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,7 +30,6 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/pinctrl/consumer.h>
#include "macb.h"
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
- struct pinctrl *pinctrl;
const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- err = PTR_ERR(pinctrl);
- if (err == -EPROBE_DEFER)
- goto err_out;
-
- dev_warn(&pdev->dev, "No pinctrl provided\n");
- }
-
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
if (!dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 02a2e90d581a..923c4878461e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1695,7 +1695,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (err) {
vp_oper->vlan_idx = NO_INDX;
mlx4_warn(&priv->dev,
- "No vlan resorces slave %d, port %d\n",
+ "No vlan resources slave %d, port %d\n",
slave, port);
return err;
}
@@ -1711,7 +1711,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
mlx4_warn(&priv->dev,
- "No mac resorces slave %d, port %d\n",
+ "No mac resources slave %d, port %d\n",
slave, port);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e2d5d57c598..871e3a5bda38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
static uint8_t num_vfs[3] = {0, 0, 0};
-static int num_vfs_argc = 3;
+static int num_vfs_argc;
module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
"num_vfs=port1,port2,port1+2");
static uint8_t probe_vf[3] = {0, 0, 0};
-static int probe_vfs_argc = 3;
+static int probe_vfs_argc;
module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
"probe_vf=port1,port2,port1+2");
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index db4280ce9c09..716fc37ada5a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -922,7 +922,7 @@ int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
- printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n",
+ printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n",
adapter->netdev->name,
(cmd == NX_IP_UP) ? "Add" : "Remove", ip);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 32058614151a..5c4068353f66 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ spin_lock(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
}
cmd_buf++;
}
+ spin_unlock(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
break;
}
- if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
+ tx_ring->sw_consumer = sw_consumer;
+ if (count && netif_running(netdev)) {
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1159031f885b..5ec5a2b0e989 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
return;
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
netxen_napi_disable(adapter);
netxen_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..3172cdf591fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
{
u32 idc_params, val;
- if (qlcnic_83xx_lockless_flash_read32(adapter,
- QLC_83XX_IDC_FLASH_PARAM_ADDR,
- (u8 *)&idc_params, 1)) {
+ if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
dev_info(&adapter->pdev->dev,
"%s:failed to get IDC params from flash\n", __func__);
adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 141f116eb868..494e8105adee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size, tx_size, ring;
+ int index, ret, length, size, ring;
char *p;
- tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+ memset(data, 0, stats->n_stats * sizeof(u64));
- memset(data, 0, tx_size * sizeof(u64));
for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
qlcnic_update_stats(adapter);
+ } else {
+ data += QLCNIC_TX_STATS_LEN;
}
}
- memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 851cb4a80d50..03cd4c3d7835 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -941,7 +941,7 @@ void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
- "could not notify %s IP 0x%x reuqest\n",
+ "could not notify %s IP 0x%x request\n",
(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e6ee226de04..b0c1521e08a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2786,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if (IS_ERR(priv->stmmac_clk)) {
dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
__func__);
- ret = PTR_ERR(priv->stmmac_clk);
- goto error_clk_get;
+ /* If failed to obtain stmmac_clk and specific clk_csr value
+ * is NOT passed from the platform, probe fail.
+ */
+ if (!priv->plat->clk_csr) {
+ ret = PTR_ERR(priv->stmmac_clk);
+ goto error_clk_get;
+ } else {
+ priv->stmmac_clk = NULL;
+ }
}
clk_prepare_enable(priv->stmmac_clk);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index f5fbc12d3e10..a43e8492b1ce 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2056,7 +2056,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
- VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
stats->rx_length_errors++;
return -EINVAL;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a9c5eaadc426..0fcb5e7eb073 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
int hdr_offset;
u32 net_trans_info;
u32 hash;
+ u32 skb_length = skb->len;
/* We will atmost need two pages to describe the rndis
@@ -562,7 +563,7 @@ do_send:
drop:
if (ret == 0) {
- net->stats.tx_bytes += skb->len;
+ net->stats.tx_bytes += skb_length;
net->stats.tx_packets++;
} else {
kfree(packet);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3381c4f91a8c..0c6adaaf898c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -112,17 +112,15 @@ out:
return err;
}
+/* Requires RTNL */
static int macvtap_set_queue(struct net_device *dev, struct file *file,
struct macvtap_queue *q)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- int err = -EBUSY;
- rtnl_lock();
if (vlan->numqueues == MAX_MACVTAP_QUEUES)
- goto out;
+ return -EBUSY;
- err = 0;
rcu_assign_pointer(q->vlan, vlan);
rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
sock_hold(&q->sk);
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
vlan->numvtaps++;
vlan->numqueues++;
-out:
- rtnl_unlock();
- return err;
+ return 0;
}
static int macvtap_disable_queue(struct macvtap_queue *q)
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk)
static int macvtap_open(struct inode *inode, struct file *file)
{
struct net *net = current->nsproxy->net_ns;
- struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
+ struct net_device *dev;
struct macvtap_queue *q;
- int err;
+ int err = -ENODEV;
- err = -ENODEV;
+ rtnl_lock();
+ dev = dev_get_by_macvtap_minor(iminor(inode));
if (!dev)
goto out;
@@ -498,6 +495,7 @@ out:
if (dev)
dev_put(dev);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 74760e8143e3..604ef210a4de 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,7 +24,7 @@
#include <net/ip6_checksum.h>
/* Version Information */
-#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
+#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
}
+static int rtl_start_rx(struct r8152 *tp)
+{
+ int i, ret = 0;
+
+ INIT_LIST_HEAD(&tp->rx_done);
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_stop_rx(struct r8152 *tp)
+{
+ int i;
+
+ for (i = 0; i < RTL8152_MAX_RX; i++)
+ usb_kill_urb(tp->rx_info[i].urb);
+
+ return 0;
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
- int i, ret;
r8152b_reset_packet_filter(tp);
@@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp)
rxdy_gated_en(tp, false);
- INIT_LIST_HEAD(&tp->rx_done);
- ret = 0;
- for (i = 0; i < RTL8152_MAX_RX; i++) {
- INIT_LIST_HEAD(&tp->rx_info[i].list);
- ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
- }
-
- return ret;
+ return rtl_start_rx(tp);
}
static int rtl8152_enable(struct r8152 *tp)
@@ -2053,8 +2070,7 @@ static void rtl_disable(struct r8152 *tp)
mdelay(1);
}
- for (i = 0; i < RTL8152_MAX_RX; i++)
- usb_kill_urb(tp->rx_info[i].urb);
+ rtl_stop_rx(tp);
rtl8152_nic_reset(tp);
}
@@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp)
}
}
-static void rtl_clear_bp(struct r8152 *tp)
-{
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
- mdelay(3);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
- ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
-
-static void r8153_clear_bp(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
- rtl_clear_bp(tp);
-}
-
static void r8153_teredo_off(struct r8152 *tp)
{
u32 ocp_data;
@@ -2249,8 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
r8152_mdio_write(tp, MII_BMCR, data);
}
- rtl_clear_bp(tp);
-
set_bit(PHY_RESET, &tp->flags);
}
@@ -2401,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
r8152_mdio_write(tp, MII_BMCR, data);
}
- r8153_clear_bp(tp);
-
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
data &= ~CTAP_SHORT_EN;
@@ -3083,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ tasklet_disable(&tp->tl);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_stop_rx(tp);
rtl_runtime_suspend_enable(tp, true);
} else {
- tasklet_disable(&tp->tl);
tp->rtl_ops.down(tp);
- tasklet_enable(&tp->tl);
}
+ tasklet_enable(&tp->tl);
}
return 0;
@@ -3108,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf)
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ set_bit(WORK_ENABLE, &tp->flags);
if (tp->speed & LINK_STATUS)
- tp->rtl_ops.disable(tp);
+ rtl_start_rx(tp);
} else {
tp->rtl_ops.up(tp);
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(tp->netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
}
- tp->speed = 0;
- netif_carrier_off(tp->netdev);
- set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
}
@@ -3405,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- r8153_power_cut_en(tp, true);
+ r8153_power_cut_en(tp, false);
}
static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -3558,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (tp) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ struct usb_device *udev = tp->udev;
+
+ if (udev->state == USB_STATE_NOTATTACHED)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e6ac8d2e610c..4b148bbb2bf6 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
- if (test_bit(ATH_OP_INVALID, &common->op_flags))
+ if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
/* shared irq, not for us */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index f3a9804988a6..16a246bfc343 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4921,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
struct brcmu_chan ch;
int i;
- for (i = 0; i <= total; i++) {
+ for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
cfg->d11inf.decchspec(&ch);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 47a998d8f99e..40ab7f0b7be0 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1353,7 +1353,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
wait_event_interruptible_timeout(priv->scan_q,
(priv->scan_req == NULL),
(15 * HZ));
- lbs_deb_assoc("assoc: scanning competed\n");
+ lbs_deb_assoc("assoc: scanning completed\n");
}
/* Find the BSS we want using available scan results */
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 088de9d25c39..25c5acc78bd1 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -332,7 +332,7 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
err_prog_firmware:
priv->hw_reset_device(card);
- lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
+ lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programming fw; ret=%d", ret);
return ret;
}
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index f868333271aa..963a4a5dc88e 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
targets->sens_res =
be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
- skb->data[MICROREAD_EMCF_A_LEN]);
targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+ if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+ r = -EINVAL;
+ goto exit_free;
+ }
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+ targets->nfcid1_len);
break;
case MICROREAD_GATE_ID_MREAD_ISO_A_3:
targets->supported_protocols =
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
targets->sens_res =
be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
- skb->data[MICROREAD_EMCF_A3_LEN]);
targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+ if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+ r = -EINVAL;
+ goto exit_free;
+ }
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+ targets->nfcid1_len);
break;
case MICROREAD_GATE_ID_MREAD_ISO_B:
targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
index db7a38ae05f7..7d688f97aa27 100644
--- a/drivers/nfc/st21nfca/Makefile
+++ b/drivers/nfc/st21nfca/Makefile
@@ -2,7 +2,8 @@
# Makefile for ST21NFCA HCI based NFC driver
#
-st21nfca_i2c-objs = i2c.o
+st21nfca_hci-objs = st21nfca.o st21nfca_dep.o
+obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o
-obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o st21nfca_dep.o
+st21nfca_i2c-objs = i2c.o
obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile
index 13d9f03b2fea..f4d835dd15f2 100644
--- a/drivers/nfc/st21nfcb/Makefile
+++ b/drivers/nfc/st21nfcb/Makefile
@@ -2,7 +2,8 @@
# Makefile for ST21NFCB NCI based NFC driver
#
-st21nfcb_i2c-objs = i2c.o
+st21nfcb_nci-objs = ndlc.o st21nfcb.o
+obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o
-obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb.o ndlc.o
+st21nfcb_i2c-objs = i2c.o
obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d8574adf0d62..293ed4b687ba 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -138,6 +138,9 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
/* Important: Don't leak passwords */
bool secure = strncmp(pp->name, "security-", 9) == 0;
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return 0;
+
if (!of_kset || !of_node_is_attached(np))
return 0;
@@ -158,6 +161,9 @@ int __of_attach_node_sysfs(struct device_node *np)
struct property *pp;
int rc;
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return 0;
+
if (!of_kset)
return 0;
@@ -1713,6 +1719,9 @@ int __of_remove_property(struct device_node *np, struct property *prop)
void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
{
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
/* at early boot, bail here and defer setup to of_init() */
if (of_kset && of_node_is_attached(np))
sysfs_remove_bin_file(&np->kobj, &prop->attr);
@@ -1777,6 +1786,9 @@ int __of_update_property(struct device_node *np, struct property *newprop,
void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
struct property *oldprop)
{
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
/* At early boot, bail out and defer setup to of_init() */
if (!of_kset)
return;
@@ -1847,6 +1859,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
struct property *pp;
+ of_aliases = of_find_node_by_path("/aliases");
of_chosen = of_find_node_by_path("/chosen");
if (of_chosen == NULL)
of_chosen = of_find_node_by_path("/chosen@0");
@@ -1862,7 +1875,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
of_stdout = of_find_node_by_path(name);
}
- of_aliases = of_find_node_by_path("/aliases");
if (!of_aliases)
return;
@@ -1986,7 +1998,7 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
- return add_preferred_console(name, index, NULL);
+ return !add_preferred_console(name, index, NULL);
}
EXPORT_SYMBOL_GPL(of_console_check);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 54fecc49a1fe..f297891d8529 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -45,6 +45,9 @@ void __of_detach_node_sysfs(struct device_node *np)
{
struct property *pp;
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
BUG_ON(!of_node_is_initialized(np));
if (!of_kset)
return;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 79cb8313c7d8..d1ffca8b34ea 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -928,7 +928,11 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
const u64 phys_offset = __pa(PAGE_OFFSET);
- base &= PAGE_MASK;
+
+ if (!PAGE_ALIGNED(base)) {
+ size -= PAGE_SIZE - (base & ~PAGE_MASK);
+ base = PAGE_ALIGN(base);
+ }
size &= PAGE_MASK;
if (base > MAX_PHYS_ADDR) {
@@ -937,10 +941,10 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
return;
}
- if (base + size > MAX_PHYS_ADDR) {
- pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
- ULONG_MAX, base + size);
- size = MAX_PHYS_ADDR - base;
+ if (base + size - 1 > MAX_PHYS_ADDR) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ ((u64)MAX_PHYS_ADDR) + 1, base + size);
+ size = MAX_PHYS_ADDR - base + 1;
}
if (base + size < phys_offset) {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 0197725e033a..3b64d0bf5bba 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -160,11 +160,10 @@ EXPORT_SYMBOL(of_device_alloc);
* can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event
* to fix up DMA configuration.
*/
-static void of_dma_configure(struct platform_device *pdev)
+static void of_dma_configure(struct device *dev)
{
u64 dma_addr, paddr, size;
int ret;
- struct device *dev = &pdev->dev;
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
@@ -229,7 +228,7 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
- of_dma_configure(dev);
+ of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
@@ -291,7 +290,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
}
/* setup generic device info */
- dev->dev.coherent_dma_mask = ~0;
dev->dev.of_node = of_node_get(node);
dev->dev.parent = parent;
dev->dev.platform_data = platform_data;
@@ -299,6 +297,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
+ of_dma_configure(&dev->dev);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index a042d065a0c7..8be2096c8423 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -395,7 +395,8 @@ static void __init superio_serial_init(void)
serial_port.iotype = UPIO_PORT;
serial_port.type = PORT_16550A;
serial_port.uartclk = 115200*16;
- serial_port.fifosize = 16;
+ serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE |
+ UPF_BOOT_AUTOCONF;
/* serial port #1 */
serial_port.iobase = sio_dev.sp1_base;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 5e01ae39ec46..2a412fa3b338 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl)
ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
else
- rc = pcie_poll_cmd(ctrl, timeout);
+ rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
/*
* Controllers with errata like Intel CF118 don't generate
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e3cf8a2e6292..4170113cde61 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
/* Check if setup is sensible at all */
if (!pass &&
(primary != bus->number || secondary <= bus->number ||
- secondary > subordinate || subordinate > bus->busn_res.end)) {
+ secondary > subordinate)) {
dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
secondary, subordinate);
broken = 1;
@@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
goto out;
}
- if (max >= bus->busn_res.end) {
- dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
- max, &bus->busn_res);
- goto out;
- }
-
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
- /* The bus will already exist if we are rescanning */
+ /* Prevent assigning a bus number that already exists.
+ * This can happen when a bridge is hot-plugged, so in
+ * this case we only re-scan this bus. */
child = pci_find_bus(pci_domain_nr(bus), max+1);
if (!child) {
child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1,
- bus->busn_res.end);
+ pci_bus_insert_busn_res(child, max+1, 0xff);
}
max++;
buses = (buses & 0xff000000)
@@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
/*
* Set the subordinate bus number to its real value.
*/
- if (max > bus->busn_res.end) {
- dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
- max, &bus->busn_res);
- max = bus->busn_res.end;
- }
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index bfd2c2e9f6cd..64d06b52f98a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -97,95 +97,6 @@ config PINCTRL_BCM281XX
BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
framework. GPIO is provided by a separate GPIO driver.
-config PINCTRL_IMX
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_IMX1_CORE
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_IMX1
- bool "IMX1 pinctrl driver"
- depends on SOC_IMX1
- select PINCTRL_IMX1_CORE
- help
- Say Y here to enable the imx1 pinctrl driver
-
-config PINCTRL_IMX27
- bool "IMX27 pinctrl driver"
- depends on SOC_IMX27
- select PINCTRL_IMX1_CORE
- help
- Say Y here to enable the imx27 pinctrl driver
-
-
-config PINCTRL_IMX25
- bool "IMX25 pinctrl driver"
- depends on OF
- depends on SOC_IMX25
- select PINCTRL_IMX
- help
- Say Y here to enable the imx25 pinctrl driver
-
-config PINCTRL_IMX35
- bool "IMX35 pinctrl driver"
- depends on SOC_IMX35
- select PINCTRL_IMX
- help
- Say Y here to enable the imx35 pinctrl driver
-
-config PINCTRL_IMX50
- bool "IMX50 pinctrl driver"
- depends on SOC_IMX50
- select PINCTRL_IMX
- help
- Say Y here to enable the imx50 pinctrl driver
-
-config PINCTRL_IMX51
- bool "IMX51 pinctrl driver"
- depends on SOC_IMX51
- select PINCTRL_IMX
- help
- Say Y here to enable the imx51 pinctrl driver
-
-config PINCTRL_IMX53
- bool "IMX53 pinctrl driver"
- depends on SOC_IMX53
- select PINCTRL_IMX
- help
- Say Y here to enable the imx53 pinctrl driver
-
-config PINCTRL_IMX6Q
- bool "IMX6Q/DL pinctrl driver"
- depends on SOC_IMX6Q
- select PINCTRL_IMX
- help
- Say Y here to enable the imx6q/dl pinctrl driver
-
-config PINCTRL_IMX6SL
- bool "IMX6SL pinctrl driver"
- depends on SOC_IMX6SL
- select PINCTRL_IMX
- help
- Say Y here to enable the imx6sl pinctrl driver
-
-config PINCTRL_IMX6SX
- bool "IMX6SX pinctrl driver"
- depends on SOC_IMX6SX
- select PINCTRL_IMX
- help
- Say Y here to enable the imx6sx pinctrl driver
-
-config PINCTRL_VF610
- bool "Freescale Vybrid VF610 pinctrl driver"
- depends on SOC_VF610
- select PINCTRL_IMX
- help
- Say Y here to enable the Freescale Vybrid VF610 pinctrl driver
-
config PINCTRL_LANTIQ
bool
depends on LANTIQ
@@ -197,19 +108,6 @@ config PINCTRL_FALCON
depends on SOC_FALCON
depends on PINCTRL_LANTIQ
-config PINCTRL_MXS
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_IMX23
- bool
- select PINCTRL_MXS
-
-config PINCTRL_IMX28
- bool
- select PINCTRL_MXS
-
config PINCTRL_ROCKCHIP
bool
select PINMUX
@@ -306,6 +204,7 @@ config PINCTRL_PALMAS
TPS65913, TPS80036 etc.
source "drivers/pinctrl/berlin/Kconfig"
+source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/nomadik/Kconfig"
source "drivers/pinctrl/qcom/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 05d227508c95..51f52d32859e 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -17,23 +17,7 @@ obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
-obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
-obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
-obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o
-obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
-obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
-obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o
-obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o
-obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o
-obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
-obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o
-obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o
-obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
-obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
-obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
-obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
-obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
@@ -52,15 +36,14 @@ obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
-obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_ARCH_BERLIN) += berlin/
+obj-y += freescale/
obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-y += nomadik/
obj-$(CONFIG_ARCH_QCOM) += qcom/
-obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
-obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/
-obj-$(CONFIG_SUPERH) += sh-pfc/
+obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
+obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index 86db2235ab00..7f0b0f93242b 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -99,30 +99,11 @@ static int berlin_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrl_dev,
return 0;
}
-static void berlin_pinctrl_dt_free_map(struct pinctrl_dev *pctrl_dev,
- struct pinctrl_map *map,
- unsigned nmaps)
-{
- int i;
-
- for (i = 0; i < nmaps; i++) {
- if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) {
- kfree(map[i].data.mux.group);
-
- /* a function can be applied to multiple groups */
- if (i == 0)
- kfree(map[i].data.mux.function);
- }
- }
-
- kfree(map);
-}
-
static const struct pinctrl_ops berlin_pinctrl_ops = {
.get_groups_count = &berlin_pinctrl_get_group_count,
.get_group_name = &berlin_pinctrl_get_group_name,
.dt_node_to_map = &berlin_pinctrl_dt_node_to_map,
- .dt_free_map = &berlin_pinctrl_dt_free_map,
+ .dt_free_map = &pinctrl_utils_dt_free_map,
};
static int berlin_pinmux_get_functions_count(struct pinctrl_dev *pctrl_dev)
@@ -170,9 +151,9 @@ berlin_pinctrl_find_function_by_name(struct berlin_pinctrl *pctrl,
return NULL;
}
-static int berlin_pinmux_enable(struct pinctrl_dev *pctrl_dev,
- unsigned function,
- unsigned group)
+static int berlin_pinmux_set(struct pinctrl_dev *pctrl_dev,
+ unsigned function,
+ unsigned group)
{
struct berlin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrl_dev);
const struct berlin_desc_group *group_desc = pctrl->desc->groups + group;
@@ -197,7 +178,7 @@ static const struct pinmux_ops berlin_pinmux_ops = {
.get_functions_count = &berlin_pinmux_get_functions_count,
.get_function_name = &berlin_pinmux_get_function_name,
.get_function_groups = &berlin_pinmux_get_function_groups,
- .enable = &berlin_pinmux_enable,
+ .set_mux = &berlin_pinmux_set,
};
static int berlin_pinctrl_add_function(struct berlin_pinctrl *pctrl,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
new file mode 100644
index 000000000000..16aac38793fe
--- /dev/null
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -0,0 +1,108 @@
+config PINCTRL_IMX
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_IMX1_CORE
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_IMX1
+ bool "IMX1 pinctrl driver"
+ depends on SOC_IMX1
+ select PINCTRL_IMX1_CORE
+ help
+ Say Y here to enable the imx1 pinctrl driver
+
+config PINCTRL_IMX21
+ bool "i.MX21 pinctrl driver"
+ depends on SOC_IMX21
+ select PINCTRL_IMX1_CORE
+ help
+ Say Y here to enable the i.MX21 pinctrl driver
+
+config PINCTRL_IMX27
+ bool "IMX27 pinctrl driver"
+ depends on SOC_IMX27
+ select PINCTRL_IMX1_CORE
+ help
+ Say Y here to enable the imx27 pinctrl driver
+
+
+config PINCTRL_IMX25
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
+
+config PINCTRL_IMX35
+ bool "IMX35 pinctrl driver"
+ depends on SOC_IMX35
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx35 pinctrl driver
+
+config PINCTRL_IMX50
+ bool "IMX50 pinctrl driver"
+ depends on SOC_IMX50
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx50 pinctrl driver
+
+config PINCTRL_IMX51
+ bool "IMX51 pinctrl driver"
+ depends on SOC_IMX51
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx51 pinctrl driver
+
+config PINCTRL_IMX53
+ bool "IMX53 pinctrl driver"
+ depends on SOC_IMX53
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx53 pinctrl driver
+
+config PINCTRL_IMX6Q
+ bool "IMX6Q/DL pinctrl driver"
+ depends on SOC_IMX6Q
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx6q/dl pinctrl driver
+
+config PINCTRL_IMX6SL
+ bool "IMX6SL pinctrl driver"
+ depends on SOC_IMX6SL
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx6sl pinctrl driver
+
+config PINCTRL_IMX6SX
+ bool "IMX6SX pinctrl driver"
+ depends on SOC_IMX6SX
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx6sx pinctrl driver
+
+config PINCTRL_VF610
+ bool "Freescale Vybrid VF610 pinctrl driver"
+ depends on SOC_VF610
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the Freescale Vybrid VF610 pinctrl driver
+
+config PINCTRL_MXS
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_IMX23
+ bool
+ select PINCTRL_MXS
+
+config PINCTRL_IMX28
+ bool
+ select PINCTRL_MXS
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
new file mode 100644
index 000000000000..bba73c22f043
--- /dev/null
+++ b/drivers/pinctrl/freescale/Makefile
@@ -0,0 +1,19 @@
+# Freescale pin control drivers
+obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
+obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
+obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o
+obj-$(CONFIG_PINCTRL_IMX21) += pinctrl-imx21.o
+obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
+obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
+obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o
+obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o
+obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o
+obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
+obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o
+obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o
+obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
+obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
+obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
+obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
+obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
+obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 946d594a64dd..f2446769247f 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -24,7 +24,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/slab.h>
-#include "core.h"
+#include "../core.h"
#include "pinctrl-imx.h"
/* The bits in CONFIG cell defined in binding doc*/
@@ -179,8 +179,8 @@ static const struct pinctrl_ops imx_pctrl_ops = {
};
-static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
@@ -204,7 +204,7 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
pin_id = pin->pin;
pin_reg = &info->pin_regs[pin_id];
- if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->mux_reg) {
+ if (pin_reg->mux_reg == -1) {
dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
return -EINVAL;
@@ -298,7 +298,7 @@ static const struct pinmux_ops imx_pmx_ops = {
.get_functions_count = imx_pmx_get_funcs_count,
.get_function_name = imx_pmx_get_func_name,
.get_function_groups = imx_pmx_get_groups,
- .enable = imx_pmx_enable,
+ .set_mux = imx_pmx_set,
};
static int imx_pinconf_get(struct pinctrl_dev *pctldev,
@@ -308,7 +308,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
const struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
- if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->conf_reg) {
+ if (pin_reg->conf_reg == -1) {
dev_err(info->dev, "Pin(%s) does not support config function\n",
info->pins[pin_id].name);
return -EINVAL;
@@ -331,7 +331,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
int i;
- if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->conf_reg) {
+ if (pin_reg->conf_reg == -1) {
dev_err(info->dev, "Pin(%s) does not support config function\n",
info->pins[pin_id].name);
return -EINVAL;
@@ -586,10 +586,11 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (!ipctl)
return -ENOMEM;
- info->pin_regs = devm_kzalloc(&pdev->dev, sizeof(*info->pin_regs) *
+ info->pin_regs = devm_kmalloc(&pdev->dev, sizeof(*info->pin_regs) *
info->npins, GFP_KERNEL);
if (!info->pin_regs)
return -ENOMEM;
+ memset(info->pin_regs, 0xff, sizeof(*info->pin_regs) * info->npins);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ipctl->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index db408b057000..49e55d39f7c8 100644
--- a/drivers/pinctrl/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -67,8 +67,8 @@ struct imx_pmx_func {
* @conf_reg: config register offset
*/
struct imx_pin_reg {
- u16 mux_reg;
- u16 conf_reg;
+ s16 mux_reg;
+ s16 conf_reg;
};
struct imx_pinctrl_soc_info {
@@ -83,8 +83,7 @@ struct imx_pinctrl_soc_info {
unsigned int flags;
};
-#define ZERO_OFFSET_VALID 0x1
-#define SHARE_MUX_CONF_REG 0x2
+#define SHARE_MUX_CONF_REG 0x1
#define NO_MUX 0x0
#define NO_PAD 0x0
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 483420757c9f..5ac59fbb2440 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -28,7 +28,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/slab.h>
-#include "core.h"
+#include "../core.h"
#include "pinctrl-imx1.h"
struct imx1_pinctrl {
@@ -298,8 +298,8 @@ static const struct pinctrl_ops imx1_pctrl_ops = {
};
-static int imx1_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int imx1_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx1_pinctrl_soc_info *info = ipctl->info;
@@ -385,7 +385,7 @@ static const struct pinmux_ops imx1_pmx_ops = {
.get_functions_count = imx1_pmx_get_funcs_count,
.get_function_name = imx1_pmx_get_func_name,
.get_function_groups = imx1_pmx_get_groups,
- .enable = imx1_pmx_enable,
+ .set_mux = imx1_pmx_set,
};
static int imx1_pinconf_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index 533a6e519648..533a6e519648 100644
--- a/drivers/pinctrl/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
diff --git a/drivers/pinctrl/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h
index 692a54c15cda..692a54c15cda 100644
--- a/drivers/pinctrl/pinctrl-imx1.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.h
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
new file mode 100644
index 000000000000..1b3b2311b033
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -0,0 +1,342 @@
+/*
+ * i.MX21 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx1.h"
+
+#define PAD_ID(port, pin) ((port) * 32 + (pin))
+#define PA 0
+#define PB 1
+#define PC 2
+#define PD 3
+#define PE 4
+#define PF 5
+
+enum imx21_pads {
+ MX21_PAD_LSCLK = PAD_ID(PA, 5),
+ MX21_PAD_LD0 = PAD_ID(PA, 6),
+ MX21_PAD_LD1 = PAD_ID(PA, 7),
+ MX21_PAD_LD2 = PAD_ID(PA, 8),
+ MX21_PAD_LD3 = PAD_ID(PA, 9),
+ MX21_PAD_LD4 = PAD_ID(PA, 10),
+ MX21_PAD_LD5 = PAD_ID(PA, 11),
+ MX21_PAD_LD6 = PAD_ID(PA, 12),
+ MX21_PAD_LD7 = PAD_ID(PA, 13),
+ MX21_PAD_LD8 = PAD_ID(PA, 14),
+ MX21_PAD_LD9 = PAD_ID(PA, 15),
+ MX21_PAD_LD10 = PAD_ID(PA, 16),
+ MX21_PAD_LD11 = PAD_ID(PA, 17),
+ MX21_PAD_LD12 = PAD_ID(PA, 18),
+ MX21_PAD_LD13 = PAD_ID(PA, 19),
+ MX21_PAD_LD14 = PAD_ID(PA, 20),
+ MX21_PAD_LD15 = PAD_ID(PA, 21),
+ MX21_PAD_LD16 = PAD_ID(PA, 22),
+ MX21_PAD_LD17 = PAD_ID(PA, 23),
+ MX21_PAD_REV = PAD_ID(PA, 24),
+ MX21_PAD_CLS = PAD_ID(PA, 25),
+ MX21_PAD_PS = PAD_ID(PA, 26),
+ MX21_PAD_SPL_SPR = PAD_ID(PA, 27),
+ MX21_PAD_HSYNC = PAD_ID(PA, 28),
+ MX21_PAD_VSYNC = PAD_ID(PA, 29),
+ MX21_PAD_CONTRAST = PAD_ID(PA, 30),
+ MX21_PAD_OE_ACD = PAD_ID(PA, 31),
+ MX21_PAD_SD2_D0 = PAD_ID(PB, 4),
+ MX21_PAD_SD2_D1 = PAD_ID(PB, 5),
+ MX21_PAD_SD2_D2 = PAD_ID(PB, 6),
+ MX21_PAD_SD2_D3 = PAD_ID(PB, 7),
+ MX21_PAD_SD2_CMD = PAD_ID(PB, 8),
+ MX21_PAD_SD2_CLK = PAD_ID(PB, 9),
+ MX21_PAD_CSI_D0 = PAD_ID(PB, 10),
+ MX21_PAD_CSI_D1 = PAD_ID(PB, 11),
+ MX21_PAD_CSI_D2 = PAD_ID(PB, 12),
+ MX21_PAD_CSI_D3 = PAD_ID(PB, 13),
+ MX21_PAD_CSI_D4 = PAD_ID(PB, 14),
+ MX21_PAD_CSI_MCLK = PAD_ID(PB, 15),
+ MX21_PAD_CSI_PIXCLK = PAD_ID(PB, 16),
+ MX21_PAD_CSI_D5 = PAD_ID(PB, 17),
+ MX21_PAD_CSI_D6 = PAD_ID(PB, 18),
+ MX21_PAD_CSI_D7 = PAD_ID(PB, 19),
+ MX21_PAD_CSI_VSYNC = PAD_ID(PB, 20),
+ MX21_PAD_CSI_HSYNC = PAD_ID(PB, 21),
+ MX21_PAD_USB_BYP = PAD_ID(PB, 22),
+ MX21_PAD_USB_PWR = PAD_ID(PB, 23),
+ MX21_PAD_USB_OC = PAD_ID(PB, 24),
+ MX21_PAD_USBH_ON = PAD_ID(PB, 25),
+ MX21_PAD_USBH1_FS = PAD_ID(PB, 26),
+ MX21_PAD_USBH1_OE = PAD_ID(PB, 27),
+ MX21_PAD_USBH1_TXDM = PAD_ID(PB, 28),
+ MX21_PAD_USBH1_TXDP = PAD_ID(PB, 29),
+ MX21_PAD_USBH1_RXDM = PAD_ID(PB, 30),
+ MX21_PAD_USBH1_RXDP = PAD_ID(PB, 31),
+ MX21_PAD_USBG_SDA = PAD_ID(PC, 5),
+ MX21_PAD_USBG_SCL = PAD_ID(PC, 6),
+ MX21_PAD_USBG_ON = PAD_ID(PC, 7),
+ MX21_PAD_USBG_FS = PAD_ID(PC, 8),
+ MX21_PAD_USBG_OE = PAD_ID(PC, 9),
+ MX21_PAD_USBG_TXDM = PAD_ID(PC, 10),
+ MX21_PAD_USBG_TXDP = PAD_ID(PC, 11),
+ MX21_PAD_USBG_RXDM = PAD_ID(PC, 12),
+ MX21_PAD_USBG_RXDP = PAD_ID(PC, 13),
+ MX21_PAD_TOUT = PAD_ID(PC, 14),
+ MX21_PAD_TIN = PAD_ID(PC, 15),
+ MX21_PAD_SAP_FS = PAD_ID(PC, 16),
+ MX21_PAD_SAP_RXD = PAD_ID(PC, 17),
+ MX21_PAD_SAP_TXD = PAD_ID(PC, 18),
+ MX21_PAD_SAP_CLK = PAD_ID(PC, 19),
+ MX21_PAD_SSI1_FS = PAD_ID(PC, 20),
+ MX21_PAD_SSI1_RXD = PAD_ID(PC, 21),
+ MX21_PAD_SSI1_TXD = PAD_ID(PC, 22),
+ MX21_PAD_SSI1_CLK = PAD_ID(PC, 23),
+ MX21_PAD_SSI2_FS = PAD_ID(PC, 24),
+ MX21_PAD_SSI2_RXD = PAD_ID(PC, 25),
+ MX21_PAD_SSI2_TXD = PAD_ID(PC, 26),
+ MX21_PAD_SSI2_CLK = PAD_ID(PC, 27),
+ MX21_PAD_SSI3_FS = PAD_ID(PC, 28),
+ MX21_PAD_SSI3_RXD = PAD_ID(PC, 29),
+ MX21_PAD_SSI3_TXD = PAD_ID(PC, 30),
+ MX21_PAD_SSI3_CLK = PAD_ID(PC, 31),
+ MX21_PAD_I2C_DATA = PAD_ID(PD, 17),
+ MX21_PAD_I2C_CLK = PAD_ID(PD, 18),
+ MX21_PAD_CSPI2_SS2 = PAD_ID(PD, 19),
+ MX21_PAD_CSPI2_SS1 = PAD_ID(PD, 20),
+ MX21_PAD_CSPI2_SS0 = PAD_ID(PD, 21),
+ MX21_PAD_CSPI2_SCLK = PAD_ID(PD, 22),
+ MX21_PAD_CSPI2_MISO = PAD_ID(PD, 23),
+ MX21_PAD_CSPI2_MOSI = PAD_ID(PD, 24),
+ MX21_PAD_CSPI1_RDY = PAD_ID(PD, 25),
+ MX21_PAD_CSPI1_SS2 = PAD_ID(PD, 26),
+ MX21_PAD_CSPI1_SS1 = PAD_ID(PD, 27),
+ MX21_PAD_CSPI1_SS0 = PAD_ID(PD, 28),
+ MX21_PAD_CSPI1_SCLK = PAD_ID(PD, 29),
+ MX21_PAD_CSPI1_MISO = PAD_ID(PD, 30),
+ MX21_PAD_CSPI1_MOSI = PAD_ID(PD, 31),
+ MX21_PAD_TEST_WB2 = PAD_ID(PE, 0),
+ MX21_PAD_TEST_WB1 = PAD_ID(PE, 1),
+ MX21_PAD_TEST_WB0 = PAD_ID(PE, 2),
+ MX21_PAD_UART2_CTS = PAD_ID(PE, 3),
+ MX21_PAD_UART2_RTS = PAD_ID(PE, 4),
+ MX21_PAD_PWMO = PAD_ID(PE, 5),
+ MX21_PAD_UART2_TXD = PAD_ID(PE, 6),
+ MX21_PAD_UART2_RXD = PAD_ID(PE, 7),
+ MX21_PAD_UART3_TXD = PAD_ID(PE, 8),
+ MX21_PAD_UART3_RXD = PAD_ID(PE, 9),
+ MX21_PAD_UART3_CTS = PAD_ID(PE, 10),
+ MX21_PAD_UART3_RTS = PAD_ID(PE, 11),
+ MX21_PAD_UART1_TXD = PAD_ID(PE, 12),
+ MX21_PAD_UART1_RXD = PAD_ID(PE, 13),
+ MX21_PAD_UART1_CTS = PAD_ID(PE, 14),
+ MX21_PAD_UART1_RTS = PAD_ID(PE, 15),
+ MX21_PAD_RTCK = PAD_ID(PE, 16),
+ MX21_PAD_RESET_OUT = PAD_ID(PE, 17),
+ MX21_PAD_SD1_D0 = PAD_ID(PE, 18),
+ MX21_PAD_SD1_D1 = PAD_ID(PE, 19),
+ MX21_PAD_SD1_D2 = PAD_ID(PE, 20),
+ MX21_PAD_SD1_D3 = PAD_ID(PE, 21),
+ MX21_PAD_SD1_CMD = PAD_ID(PE, 22),
+ MX21_PAD_SD1_CLK = PAD_ID(PE, 23),
+ MX21_PAD_NFRB = PAD_ID(PF, 0),
+ MX21_PAD_NFCE = PAD_ID(PF, 1),
+ MX21_PAD_NFWP = PAD_ID(PF, 2),
+ MX21_PAD_NFCLE = PAD_ID(PF, 3),
+ MX21_PAD_NFALE = PAD_ID(PF, 4),
+ MX21_PAD_NFRE = PAD_ID(PF, 5),
+ MX21_PAD_NFWE = PAD_ID(PF, 6),
+ MX21_PAD_NFIO0 = PAD_ID(PF, 7),
+ MX21_PAD_NFIO1 = PAD_ID(PF, 8),
+ MX21_PAD_NFIO2 = PAD_ID(PF, 9),
+ MX21_PAD_NFIO3 = PAD_ID(PF, 10),
+ MX21_PAD_NFIO4 = PAD_ID(PF, 11),
+ MX21_PAD_NFIO5 = PAD_ID(PF, 12),
+ MX21_PAD_NFIO6 = PAD_ID(PF, 13),
+ MX21_PAD_NFIO7 = PAD_ID(PF, 14),
+ MX21_PAD_CLKO = PAD_ID(PF, 15),
+ MX21_PAD_RESERVED = PAD_ID(PF, 16),
+ MX21_PAD_CS4 = PAD_ID(PF, 21),
+ MX21_PAD_CS5 = PAD_ID(PF, 22),
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx21_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX21_PAD_LSCLK),
+ IMX_PINCTRL_PIN(MX21_PAD_LD0),
+ IMX_PINCTRL_PIN(MX21_PAD_LD1),
+ IMX_PINCTRL_PIN(MX21_PAD_LD2),
+ IMX_PINCTRL_PIN(MX21_PAD_LD3),
+ IMX_PINCTRL_PIN(MX21_PAD_LD4),
+ IMX_PINCTRL_PIN(MX21_PAD_LD5),
+ IMX_PINCTRL_PIN(MX21_PAD_LD6),
+ IMX_PINCTRL_PIN(MX21_PAD_LD7),
+ IMX_PINCTRL_PIN(MX21_PAD_LD8),
+ IMX_PINCTRL_PIN(MX21_PAD_LD9),
+ IMX_PINCTRL_PIN(MX21_PAD_LD10),
+ IMX_PINCTRL_PIN(MX21_PAD_LD11),
+ IMX_PINCTRL_PIN(MX21_PAD_LD12),
+ IMX_PINCTRL_PIN(MX21_PAD_LD13),
+ IMX_PINCTRL_PIN(MX21_PAD_LD14),
+ IMX_PINCTRL_PIN(MX21_PAD_LD15),
+ IMX_PINCTRL_PIN(MX21_PAD_LD16),
+ IMX_PINCTRL_PIN(MX21_PAD_LD17),
+ IMX_PINCTRL_PIN(MX21_PAD_REV),
+ IMX_PINCTRL_PIN(MX21_PAD_CLS),
+ IMX_PINCTRL_PIN(MX21_PAD_PS),
+ IMX_PINCTRL_PIN(MX21_PAD_SPL_SPR),
+ IMX_PINCTRL_PIN(MX21_PAD_HSYNC),
+ IMX_PINCTRL_PIN(MX21_PAD_VSYNC),
+ IMX_PINCTRL_PIN(MX21_PAD_CONTRAST),
+ IMX_PINCTRL_PIN(MX21_PAD_OE_ACD),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_D0),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_D1),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_D2),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_D3),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(MX21_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D0),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D1),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D2),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D3),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D4),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_MCLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_PIXCLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D5),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D6),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_D7),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_VSYNC),
+ IMX_PINCTRL_PIN(MX21_PAD_CSI_HSYNC),
+ IMX_PINCTRL_PIN(MX21_PAD_USB_BYP),
+ IMX_PINCTRL_PIN(MX21_PAD_USB_PWR),
+ IMX_PINCTRL_PIN(MX21_PAD_USB_OC),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH_ON),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_OE),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDM),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDP),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDM),
+ IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDP),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_SDA),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_SCL),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_ON),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_OE),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDM),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDP),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDM),
+ IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDP),
+ IMX_PINCTRL_PIN(MX21_PAD_TOUT),
+ IMX_PINCTRL_PIN(MX21_PAD_TIN),
+ IMX_PINCTRL_PIN(MX21_PAD_SAP_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_SAP_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SAP_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SAP_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI1_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI1_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI1_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI1_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI2_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI2_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI2_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI2_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI3_FS),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI3_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI3_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_SSI3_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_I2C_DATA),
+ IMX_PINCTRL_PIN(MX21_PAD_I2C_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS2),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS1),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS0),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MISO),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_RDY),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS2),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS1),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS0),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MISO),
+ IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX21_PAD_TEST_WB2),
+ IMX_PINCTRL_PIN(MX21_PAD_TEST_WB1),
+ IMX_PINCTRL_PIN(MX21_PAD_TEST_WB0),
+ IMX_PINCTRL_PIN(MX21_PAD_UART2_CTS),
+ IMX_PINCTRL_PIN(MX21_PAD_UART2_RTS),
+ IMX_PINCTRL_PIN(MX21_PAD_PWMO),
+ IMX_PINCTRL_PIN(MX21_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART3_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART3_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART3_CTS),
+ IMX_PINCTRL_PIN(MX21_PAD_UART3_RTS),
+ IMX_PINCTRL_PIN(MX21_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX21_PAD_UART1_CTS),
+ IMX_PINCTRL_PIN(MX21_PAD_UART1_RTS),
+ IMX_PINCTRL_PIN(MX21_PAD_RTCK),
+ IMX_PINCTRL_PIN(MX21_PAD_RESET_OUT),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_D0),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_D1),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_D2),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_D3),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX21_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX21_PAD_NFRB),
+ IMX_PINCTRL_PIN(MX21_PAD_NFCE),
+ IMX_PINCTRL_PIN(MX21_PAD_NFWP),
+ IMX_PINCTRL_PIN(MX21_PAD_NFCLE),
+ IMX_PINCTRL_PIN(MX21_PAD_NFALE),
+ IMX_PINCTRL_PIN(MX21_PAD_NFRE),
+ IMX_PINCTRL_PIN(MX21_PAD_NFWE),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO0),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO1),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO2),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO3),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO4),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO5),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO6),
+ IMX_PINCTRL_PIN(MX21_PAD_NFIO7),
+ IMX_PINCTRL_PIN(MX21_PAD_CLKO),
+ IMX_PINCTRL_PIN(MX21_PAD_RESERVED),
+ IMX_PINCTRL_PIN(MX21_PAD_CS4),
+ IMX_PINCTRL_PIN(MX21_PAD_CS5),
+};
+
+static struct imx1_pinctrl_soc_info imx21_pinctrl_info = {
+ .pins = imx21_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx21_pinctrl_pads),
+};
+
+static int __init imx21_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx1_pinctrl_core_probe(pdev, &imx21_pinctrl_info);
+}
+
+static const struct of_device_id imx21_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx21-iomuxc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx21_pinctrl_of_match);
+
+static struct platform_driver imx21_pinctrl_driver = {
+ .driver = {
+ .name = "imx21-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = imx21_pinctrl_of_match,
+ },
+ .remove = imx1_pinctrl_core_remove,
+};
+module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Freescale i.MX21 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c
index e76d75c9d1ba..df79096becb0 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx23.c
@@ -272,7 +272,7 @@ static int imx23_pinctrl_probe(struct platform_device *pdev)
return mxs_pinctrl_probe(pdev, &imx23_pinctrl_data);
}
-static struct of_device_id imx23_pinctrl_of_match[] = {
+static const struct of_device_id imx23_pinctrl_of_match[] = {
{ .compatible = "fsl,imx23-pinctrl", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 1aae1b61c4dc..550e6d77ac2b 100644
--- a/drivers/pinctrl/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -315,7 +315,7 @@ static struct imx_pinctrl_soc_info imx25_pinctrl_info = {
.npins = ARRAY_SIZE(imx25_pinctrl_pads),
};
-static struct of_device_id imx25_pinctrl_of_match[] = {
+static const struct of_device_id imx25_pinctrl_of_match[] = {
{ .compatible = "fsl,imx25-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index f8dfefb69968..945eccadea74 100644
--- a/drivers/pinctrl/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -389,7 +389,7 @@ static struct imx1_pinctrl_soc_info imx27_pinctrl_info = {
.npins = ARRAY_SIZE(imx27_pinctrl_pads),
};
-static struct of_device_id imx27_pinctrl_of_match[] = {
+static const struct of_device_id imx27_pinctrl_of_match[] = {
{ .compatible = "fsl,imx27-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c
index 79c9c8d296af..3bd45da21229 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx28.c
@@ -388,7 +388,7 @@ static int imx28_pinctrl_probe(struct platform_device *pdev)
return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data);
}
-static struct of_device_id imx28_pinctrl_of_match[] = {
+static const struct of_device_id imx28_pinctrl_of_match[] = {
{ .compatible = "fsl,imx28-pinctrl", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 278a04ae8940..6bfbcd0112c1 100644
--- a/drivers/pinctrl/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1005,7 +1005,7 @@ static struct imx_pinctrl_soc_info imx35_pinctrl_info = {
.npins = ARRAY_SIZE(imx35_pinctrl_pads),
};
-static struct of_device_id imx35_pinctrl_of_match[] = {
+static const struct of_device_id imx35_pinctrl_of_match[] = {
{ .compatible = "fsl,imx35-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index b06feed1b038..e8bd604ab147 100644
--- a/drivers/pinctrl/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -391,7 +391,7 @@ static struct imx_pinctrl_soc_info imx50_pinctrl_info = {
.npins = ARRAY_SIZE(imx50_pinctrl_pads),
};
-static struct of_device_id imx50_pinctrl_of_match[] = {
+static const struct of_device_id imx50_pinctrl_of_match[] = {
{ .compatible = "fsl,imx50-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 19ab182bef61..b818051db7c9 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -768,7 +768,7 @@ static struct imx_pinctrl_soc_info imx51_pinctrl_info = {
.npins = ARRAY_SIZE(imx51_pinctrl_pads),
};
-static struct of_device_id imx51_pinctrl_of_match[] = {
+static const struct of_device_id imx51_pinctrl_of_match[] = {
{ .compatible = "fsl,imx51-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index f8d45c4cfde7..1884d53cf750 100644
--- a/drivers/pinctrl/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -454,7 +454,7 @@ static struct imx_pinctrl_soc_info imx53_pinctrl_info = {
.npins = ARRAY_SIZE(imx53_pinctrl_pads),
};
-static struct of_device_id imx53_pinctrl_of_match[] = {
+static const struct of_device_id imx53_pinctrl_of_match[] = {
{ .compatible = "fsl,imx53-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index db2a1489bd99..656c4b08cc2e 100644
--- a/drivers/pinctrl/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -460,7 +460,7 @@ static struct imx_pinctrl_soc_info imx6dl_pinctrl_info = {
.npins = ARRAY_SIZE(imx6dl_pinctrl_pads),
};
-static struct of_device_id imx6dl_pinctrl_of_match[] = {
+static const struct of_device_id imx6dl_pinctrl_of_match[] = {
{ .compatible = "fsl,imx6dl-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 8eb5ac1bd5f6..59bb5b4ec0f6 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -466,7 +466,7 @@ static struct imx_pinctrl_soc_info imx6q_pinctrl_info = {
.npins = ARRAY_SIZE(imx6q_pinctrl_pads),
};
-static struct of_device_id imx6q_pinctrl_of_match[] = {
+static const struct of_device_id imx6q_pinctrl_of_match[] = {
{ .compatible = "fsl,imx6q-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index f21b7389df3c..e0924bd7b98c 100644
--- a/drivers/pinctrl/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -366,10 +366,11 @@ static struct imx_pinctrl_soc_info imx6sl_pinctrl_info = {
.npins = ARRAY_SIZE(imx6sl_pinctrl_pads),
};
-static struct of_device_id imx6sl_pinctrl_of_match[] = {
+static const struct of_device_id imx6sl_pinctrl_of_match[] = {
{ .compatible = "fsl,imx6sl-iomuxc", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx6sl_pinctrl_of_match);
static int imx6sl_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 09758a56b9df..840344c8580d 100644
--- a/drivers/pinctrl/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -370,7 +370,7 @@ static struct imx_pinctrl_soc_info imx6sx_pinctrl_info = {
.npins = ARRAY_SIZE(imx6sx_pinctrl_pads),
};
-static struct of_device_id imx6sx_pinctrl_of_match[] = {
+static const struct of_device_id imx6sx_pinctrl_of_match[] = {
{ .compatible = "fsl,imx6sx-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 40c76f26998c..f98c6bb0f769 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -21,7 +21,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "core.h"
+#include "../core.h"
#include "pinctrl-mxs.h"
#define SUFFIX_LEN 4
@@ -195,8 +195,8 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
struct mxs_group *g = &d->soc->groups[group];
@@ -223,7 +223,7 @@ static const struct pinmux_ops mxs_pinmux_ops = {
.get_functions_count = mxs_pinctrl_get_funcs_count,
.get_function_name = mxs_pinctrl_get_func_name,
.get_function_groups = mxs_pinctrl_get_func_groups,
- .enable = mxs_pinctrl_enable,
+ .set_mux = mxs_pinctrl_set_mux,
};
static int mxs_pinconf_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h
index fdd88d0bae22..fdd88d0bae22 100644
--- a/drivers/pinctrl/pinctrl-mxs.h
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.h
diff --git a/drivers/pinctrl/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index bddd913d28ba..b788e1578954 100644
--- a/drivers/pinctrl/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
.pins = vf610_pinctrl_pads,
.npins = ARRAY_SIZE(vf610_pinctrl_pads),
- .flags = ZERO_OFFSET_VALID | SHARE_MUX_CONF_REG,
+ .flags = SHARE_MUX_CONF_REG,
};
static struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 9908374f8f92..f3b426cdaf8f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -259,8 +259,8 @@ static int mvebu_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned fid,
return 0;
}
-static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid,
- unsigned gid)
+static int mvebu_pinmux_set(struct pinctrl_dev *pctldev, unsigned fid,
+ unsigned gid)
{
struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct mvebu_pinctrl_function *func = &pctl->functions[fid];
@@ -344,7 +344,7 @@ static const struct pinmux_ops mvebu_pinmux_ops = {
.get_function_groups = mvebu_pinmux_get_groups,
.gpio_request_enable = mvebu_pinmux_gpio_request_enable,
.gpio_set_direction = mvebu_pinmux_gpio_set_direction,
- .enable = mvebu_pinmux_enable,
+ .set_mux = mvebu_pinmux_set,
};
static int mvebu_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 8c6fd8d4dd3c..47f493149863 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -34,6 +34,7 @@
#include "pinctrl-abx500.h"
#include "../core.h"
#include "../pinconf.h"
+#include "../pinctrl-utils.h"
/*
* The AB9540 and AB8540 GPIO support are extended versions
@@ -708,8 +709,8 @@ static int abx500_pmx_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int abx500_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
{
struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
struct gpio_chip *chip = &pct->chip;
@@ -783,7 +784,7 @@ static const struct pinmux_ops abx500_pinmux_ops = {
.get_functions_count = abx500_pmx_get_funcs_cnt,
.get_function_name = abx500_pmx_get_func_name,
.get_function_groups = abx500_pmx_get_func_groups,
- .enable = abx500_pmx_enable,
+ .set_mux = abx500_pmx_set,
.gpio_request_enable = abx500_gpio_request_enable,
.gpio_disable_free = abx500_gpio_disable_free,
};
@@ -826,41 +827,6 @@ static void abx500_pin_dbg_show(struct pinctrl_dev *pctldev,
chip->base + offset - 1);
}
-static void abx500_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- int i;
-
- for (i = 0; i < num_maps; i++)
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
- kfree(map[i].data.configs.configs);
- kfree(map);
-}
-
-static int abx500_dt_reserve_map(struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps,
- unsigned reserve)
-{
- unsigned old_num = *reserved_maps;
- unsigned new_num = *num_maps + reserve;
- struct pinctrl_map *new_map;
-
- if (old_num >= new_num)
- return 0;
-
- new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
- if (!new_map)
- return -ENOMEM;
-
- memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
-
- *map = new_map;
- *reserved_maps = new_num;
-
- return 0;
-}
-
static int abx500_dt_add_map_mux(struct pinctrl_map **map,
unsigned *reserved_maps,
unsigned *num_maps, const char *group,
@@ -926,19 +892,32 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned long *configs;
unsigned int nconfigs = 0;
bool has_config = 0;
- unsigned reserve = 0;
struct property *prop;
const char *group, *gpio_name;
struct device_node *np_config;
ret = of_property_read_string(np, "ste,function", &function);
- if (ret >= 0)
- reserve = 1;
+ if (ret >= 0) {
+ ret = of_property_count_strings(np, "ste,pins");
+ if (ret < 0)
+ goto exit;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
+ num_maps, ret);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "ste,pins", prop, group) {
+ ret = abx500_dt_add_map_mux(map, reserved_maps,
+ num_maps, group, function);
+ if (ret < 0)
+ goto exit;
+ }
+ }
ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs);
if (nconfigs)
has_config = 1;
-
np_config = of_parse_phandle(np, "ste,config", 0);
if (np_config) {
ret = pinconf_generic_parse_dt_config(np_config, &configs,
@@ -947,28 +926,18 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto exit;
has_config |= nconfigs;
}
+ if (has_config) {
+ ret = of_property_count_strings(np, "ste,pins");
+ if (ret < 0)
+ goto exit;
- ret = of_property_count_strings(np, "ste,pins");
- if (ret < 0)
- goto exit;
-
- if (has_config)
- reserve++;
-
- reserve *= ret;
-
- ret = abx500_dt_reserve_map(map, reserved_maps, num_maps, reserve);
- if (ret < 0)
- goto exit;
+ ret = pinctrl_utils_reserve_map(pctldev, map,
+ reserved_maps,
+ num_maps, ret);
+ if (ret < 0)
+ goto exit;
- of_property_for_each_string(np, "ste,pins", prop, group) {
- if (function) {
- ret = abx500_dt_add_map_mux(map, reserved_maps,
- num_maps, group, function);
- if (ret < 0)
- goto exit;
- }
- if (has_config) {
+ of_property_for_each_string(np, "ste,pins", prop, group) {
gpio_name = abx500_find_pin_name(pctldev, group);
ret = abx500_dt_add_map_configs(map, reserved_maps,
@@ -976,8 +945,8 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
}
-
}
+
exit:
return ret;
}
@@ -998,7 +967,7 @@ static int abx500_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = abx500_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- abx500_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
return ret;
}
}
@@ -1012,7 +981,7 @@ static const struct pinctrl_ops abx500_pinctrl_ops = {
.get_group_pins = abx500_get_group_pins,
.pin_dbg_show = abx500_pin_dbg_show,
.dt_node_to_map = abx500_dt_node_to_map,
- .dt_free_map = abx500_dt_free_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
};
static int abx500_pin_config_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index e7cab07eef47..3c29d9187146 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -32,6 +32,7 @@
#include <linux/pinctrl/consumer.h>
#include "pinctrl-nomadik.h"
#include "../core.h"
+#include "../pinctrl-utils.h"
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -985,6 +986,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
container_of(chip, struct nmk_gpio_chip, chip);
int mode;
bool is_out;
+ bool data_out;
bool pull;
u32 bit = 1 << offset;
const char *modes[] = {
@@ -997,28 +999,41 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
[NMK_GPIO_ALT_C+3] = "altC3",
[NMK_GPIO_ALT_C+4] = "altC4",
};
+ const char *pulls[] = {
+ "none ",
+ "pull down",
+ "pull up ",
+ };
clk_enable(nmk_chip->clk);
is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit);
pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
+ data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & bit);
mode = nmk_gpio_get_mode(gpio);
if ((mode == NMK_GPIO_ALT_C) && pctldev)
mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
- seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
- gpio, label ?: "(none)",
- is_out ? "out" : "in ",
- chip->get
- ? (chip->get(chip, offset) ? "hi" : "lo")
- : "? ",
- (mode < 0) ? "unknown" : modes[mode],
- pull ? "pull" : "none");
-
- if (!is_out) {
+ if (is_out) {
+ seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
+ gpio,
+ label ?: "(none)",
+ data_out ? "hi" : "lo",
+ (mode < 0) ? "unknown" : modes[mode]);
+ } else {
int irq = gpio_to_irq(gpio);
struct irq_desc *desc = irq_to_desc(irq);
+ int pullidx = 0;
- /* This races with request_irq(), set_irq_type(),
+ if (pull)
+ pullidx = data_out ? 1 : 2;
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
+ gpio,
+ label ?: "(none)",
+ pulls[pullidx],
+ (mode < 0) ? "unknown" : modes[mode]);
+ /*
+ * This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
*/
if (irq > 0 && desc && desc->action) {
@@ -1338,39 +1353,6 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
}
-static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- int i;
-
- for (i = 0; i < num_maps; i++)
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
- kfree(map[i].data.configs.configs);
- kfree(map);
-}
-
-static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, unsigned reserve)
-{
- unsigned old_num = *reserved_maps;
- unsigned new_num = *num_maps + reserve;
- struct pinctrl_map *new_map;
-
- if (old_num >= new_num)
- return 0;
-
- new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
- if (!new_map)
- return -ENOMEM;
-
- memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
-
- *map = new_map;
- *reserved_maps = new_num;
-
- return 0;
-}
-
static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
unsigned *num_maps, const char *group,
const char *function)
@@ -1537,51 +1519,55 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
const char *function = NULL;
unsigned long configs = 0;
bool has_config = 0;
- unsigned reserve = 0;
struct property *prop;
const char *group, *gpio_name;
struct device_node *np_config;
ret = of_property_read_string(np, "ste,function", &function);
- if (ret >= 0)
- reserve = 1;
-
- has_config = nmk_pinctrl_dt_get_config(np, &configs);
-
- np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
- has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
-
- ret = of_property_count_strings(np, "ste,pins");
- if (ret < 0)
- goto exit;
-
- if (has_config)
- reserve++;
-
- reserve *= ret;
-
- ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve);
- if (ret < 0)
- goto exit;
-
- of_property_for_each_string(np, "ste,pins", prop, group) {
- if (function) {
+ if (ret >= 0) {
+ ret = of_property_count_strings(np, "ste,pins");
+ if (ret < 0)
+ goto exit;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map,
+ reserved_maps,
+ num_maps, ret);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "ste,pins", prop, group) {
ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
group, function);
if (ret < 0)
goto exit;
}
- if (has_config) {
+ }
+
+ has_config = nmk_pinctrl_dt_get_config(np, &configs);
+ np_config = of_parse_phandle(np, "ste,config", 0);
+ if (np_config)
+ has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ if (has_config) {
+ ret = of_property_count_strings(np, "ste,pins");
+ if (ret < 0)
+ goto exit;
+ ret = pinctrl_utils_reserve_map(pctldev, map,
+ reserved_maps,
+ num_maps, ret);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "ste,pins", prop, group) {
gpio_name = nmk_find_pin_name(pctldev, group);
- ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps,
- gpio_name, &configs, 1);
+ ret = nmk_dt_add_map_configs(map, reserved_maps,
+ num_maps,
+ gpio_name, &configs, 1);
if (ret < 0)
goto exit;
}
-
}
+
exit:
return ret;
}
@@ -1602,7 +1588,7 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
return ret;
}
}
@@ -1616,7 +1602,7 @@ static const struct pinctrl_ops nmk_pinctrl_ops = {
.get_group_pins = nmk_get_group_pins,
.pin_dbg_show = nmk_pin_dbg_show,
.dt_node_to_map = nmk_pinctrl_dt_node_to_map,
- .dt_free_map = nmk_pinctrl_dt_free_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
};
static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -1647,8 +1633,8 @@ static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
const struct nmk_pingroup *g;
@@ -1810,7 +1796,7 @@ static const struct pinmux_ops nmk_pinmux_ops = {
.get_functions_count = nmk_pmx_get_funcs_cnt,
.get_function_name = nmk_pmx_get_func_name,
.get_function_groups = nmk_pmx_get_func_groups,
- .enable = nmk_pmx_enable,
+ .set_mux = nmk_pmx_set,
.gpio_request_enable = nmk_gpio_request_enable,
.gpio_disable_free = nmk_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index b092b93c67a1..8434439c5017 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -619,8 +619,8 @@ static struct pinctrl_ops adi_pctrl_ops = {
.get_group_pins = adi_get_group_pins,
};
-static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id,
- unsigned group_id)
+static int adi_pinmux_set(struct pinctrl_dev *pctldev, unsigned func_id,
+ unsigned group_id)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
struct gpio_port *port;
@@ -698,7 +698,7 @@ static int adi_pinmux_request_gpio(struct pinctrl_dev *pctldev,
}
static struct pinmux_ops adi_pinmux_ops = {
- .enable = adi_pinmux_enable,
+ .set_mux = adi_pinmux_set,
.get_functions_count = adi_pinmux_get_funcs_count,
.get_function_name = adi_pinmux_get_func_name,
.get_function_groups = adi_pinmux_get_groups,
@@ -1041,7 +1041,6 @@ static int adi_gpio_remove(struct platform_device *pdev)
u8 offset;
list_del(&port->node);
- gpiochip_remove_pin_ranges(&port->chip);
gpiochip_remove(&port->chip);
if (port->pint) {
for (offset = 0; offset < port->width; offset++)
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 0e4ec91f4d49..1f790a4b83fe 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -230,7 +230,7 @@ static int as3722_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int as3722_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+static int as3722_pinctrl_set(struct pinctrl_dev *pctldev, unsigned function,
unsigned group)
{
struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev);
@@ -327,7 +327,7 @@ static const struct pinmux_ops as3722_pinmux_ops = {
.get_functions_count = as3722_pinctrl_get_funcs_count,
.get_function_name = as3722_pinctrl_get_func_name,
.get_function_groups = as3722_pinctrl_get_func_groups,
- .enable = as3722_pinctrl_enable,
+ .set_mux = as3722_pinctrl_set,
.gpio_request_enable = as3722_pinctrl_gpio_request_enable,
.gpio_set_direction = as3722_pinctrl_gpio_set_direction,
};
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 60464a2648aa..354a81d40925 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -58,11 +58,28 @@ static int gpio_banks;
#define DEGLITCH (1 << 2)
#define PULL_DOWN (1 << 3)
#define DIS_SCHMIT (1 << 4)
+#define DRIVE_STRENGTH_SHIFT 5
+#define DRIVE_STRENGTH_MASK 0x3
+#define DRIVE_STRENGTH (DRIVE_STRENGTH_MASK << DRIVE_STRENGTH_SHIFT)
#define DEBOUNCE (1 << 16)
#define DEBOUNCE_VAL_SHIFT 17
#define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT)
/**
+ * These defines will translated the dt binding settings to our internal
+ * settings. They are not necessarily the same value as the register setting.
+ * The actual drive strength current of low, medium and high must be looked up
+ * from the corresponding device datasheet. This value is different for pins
+ * that are even in the same banks. It is also dependent on VCC.
+ * DRIVE_STRENGTH_DEFAULT is just a placeholder to avoid changing the drive
+ * strength when there is no dt config for it.
+ */
+#define DRIVE_STRENGTH_DEFAULT (0 << DRIVE_STRENGTH_SHIFT)
+#define DRIVE_STRENGTH_LOW (1 << DRIVE_STRENGTH_SHIFT)
+#define DRIVE_STRENGTH_MED (2 << DRIVE_STRENGTH_SHIFT)
+#define DRIVE_STRENGTH_HI (3 << DRIVE_STRENGTH_SHIFT)
+
+/**
* struct at91_pmx_func - describes AT91 pinmux functions
* @name: the name of this specific function
* @groups: corresponding pin groups
@@ -148,6 +165,9 @@ struct at91_pinctrl_mux_ops {
void (*set_pulldown)(void __iomem *pio, unsigned mask, bool is_on);
bool (*get_schmitt_trig)(void __iomem *pio, unsigned pin);
void (*disable_schmitt_trig)(void __iomem *pio, unsigned mask);
+ unsigned (*get_drivestrength)(void __iomem *pio, unsigned pin);
+ void (*set_drivestrength)(void __iomem *pio, unsigned pin,
+ u32 strength);
/* irq */
int (*irq_type)(struct irq_data *d, unsigned type);
};
@@ -315,6 +335,30 @@ static unsigned pin_to_mask(unsigned int pin)
return 1 << pin;
}
+static unsigned two_bit_pin_value_shift_amount(unsigned int pin)
+{
+ /* return the shift value for a pin for "two bit" per pin registers,
+ * i.e. drive strength */
+ return 2*((pin >= MAX_NB_GPIO_PER_BANK/2)
+ ? pin - MAX_NB_GPIO_PER_BANK/2 : pin);
+}
+
+static unsigned sama5d3_get_drive_register(unsigned int pin)
+{
+ /* drive strength is split between two registers
+ * with two bits per pin */
+ return (pin >= MAX_NB_GPIO_PER_BANK/2)
+ ? SAMA5D3_PIO_DRIVER2 : SAMA5D3_PIO_DRIVER1;
+}
+
+static unsigned at91sam9x5_get_drive_register(unsigned int pin)
+{
+ /* drive strength is split between two registers
+ * with two bits per pin */
+ return (pin >= MAX_NB_GPIO_PER_BANK/2)
+ ? AT91SAM9X5_PIO_DRIVER2 : AT91SAM9X5_PIO_DRIVER1;
+}
+
static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
{
writel_relaxed(mask, pio + PIO_IDR);
@@ -327,6 +371,9 @@ static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
{
+ if (on)
+ writel_relaxed(mask, pio + PIO_PPDDR);
+
writel_relaxed(mask, pio + (on ? PIO_PUER : PIO_PUDR));
}
@@ -455,6 +502,9 @@ static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
{
+ if (is_on)
+ __raw_writel(mask, pio + PIO_PUDR);
+
__raw_writel(mask, pio + (is_on ? PIO_PPDER : PIO_PPDDR));
}
@@ -468,6 +518,79 @@ static bool at91_mux_pio3_get_schmitt_trig(void __iomem *pio, unsigned pin)
return (__raw_readl(pio + PIO_SCHMITT) >> pin) & 0x1;
}
+static inline u32 read_drive_strength(void __iomem *reg, unsigned pin)
+{
+ unsigned tmp = __raw_readl(reg);
+
+ tmp = tmp >> two_bit_pin_value_shift_amount(pin);
+
+ return tmp & DRIVE_STRENGTH_MASK;
+}
+
+static unsigned at91_mux_sama5d3_get_drivestrength(void __iomem *pio,
+ unsigned pin)
+{
+ unsigned tmp = read_drive_strength(pio +
+ sama5d3_get_drive_register(pin), pin);
+
+ /* SAMA5 strength is 1:1 with our defines,
+ * except 0 is equivalent to low per datasheet */
+ if (!tmp)
+ tmp = DRIVE_STRENGTH_LOW;
+
+ return tmp;
+}
+
+static unsigned at91_mux_sam9x5_get_drivestrength(void __iomem *pio,
+ unsigned pin)
+{
+ unsigned tmp = read_drive_strength(pio +
+ at91sam9x5_get_drive_register(pin), pin);
+
+ /* strength is inverse in SAM9x5s hardware with the pinctrl defines
+ * hardware: 0 = hi, 1 = med, 2 = low, 3 = rsvd */
+ tmp = DRIVE_STRENGTH_HI - tmp;
+
+ return tmp;
+}
+
+static void set_drive_strength(void __iomem *reg, unsigned pin, u32 strength)
+{
+ unsigned tmp = __raw_readl(reg);
+ unsigned shift = two_bit_pin_value_shift_amount(pin);
+
+ tmp &= ~(DRIVE_STRENGTH_MASK << shift);
+ tmp |= strength << shift;
+
+ __raw_writel(tmp, reg);
+}
+
+static void at91_mux_sama5d3_set_drivestrength(void __iomem *pio, unsigned pin,
+ u32 setting)
+{
+ /* do nothing if setting is zero */
+ if (!setting)
+ return;
+
+ /* strength is 1 to 1 with setting for SAMA5 */
+ set_drive_strength(pio + sama5d3_get_drive_register(pin), pin, setting);
+}
+
+static void at91_mux_sam9x5_set_drivestrength(void __iomem *pio, unsigned pin,
+ u32 setting)
+{
+ /* do nothing if setting is zero */
+ if (!setting)
+ return;
+
+ /* strength is inverse on SAM9x5s with our defines
+ * 0 = hi, 1 = med, 2 = low, 3 = rsvd */
+ setting = DRIVE_STRENGTH_HI - setting;
+
+ set_drive_strength(pio + at91sam9x5_get_drive_register(pin), pin,
+ setting);
+}
+
static struct at91_pinctrl_mux_ops at91rm9200_ops = {
.get_periph = at91_mux_get_periph,
.mux_A_periph = at91_mux_set_A_periph,
@@ -491,6 +614,27 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = {
.set_pulldown = at91_mux_pio3_set_pulldown,
.get_schmitt_trig = at91_mux_pio3_get_schmitt_trig,
.disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig,
+ .get_drivestrength = at91_mux_sam9x5_get_drivestrength,
+ .set_drivestrength = at91_mux_sam9x5_set_drivestrength,
+ .irq_type = alt_gpio_irq_type,
+};
+
+static struct at91_pinctrl_mux_ops sama5d3_ops = {
+ .get_periph = at91_mux_pio3_get_periph,
+ .mux_A_periph = at91_mux_pio3_set_A_periph,
+ .mux_B_periph = at91_mux_pio3_set_B_periph,
+ .mux_C_periph = at91_mux_pio3_set_C_periph,
+ .mux_D_periph = at91_mux_pio3_set_D_periph,
+ .get_deglitch = at91_mux_pio3_get_deglitch,
+ .set_deglitch = at91_mux_pio3_set_deglitch,
+ .get_debounce = at91_mux_pio3_get_debounce,
+ .set_debounce = at91_mux_pio3_set_debounce,
+ .get_pulldown = at91_mux_pio3_get_pulldown,
+ .set_pulldown = at91_mux_pio3_set_pulldown,
+ .get_schmitt_trig = at91_mux_pio3_get_schmitt_trig,
+ .disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig,
+ .get_drivestrength = at91_mux_sama5d3_get_drivestrength,
+ .set_drivestrength = at91_mux_sama5d3_set_drivestrength,
.irq_type = alt_gpio_irq_type,
};
@@ -554,8 +698,8 @@ static void at91_mux_gpio_enable(void __iomem *pio, unsigned mask, bool input)
writel_relaxed(mask, pio + (input ? PIO_ODR : PIO_OER));
}
-static int at91_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct at91_pmx_pin *pins_conf = info->groups[group].pins_conf;
@@ -684,7 +828,7 @@ static const struct pinmux_ops at91_pmx_ops = {
.get_functions_count = at91_pmx_get_funcs_count,
.get_function_name = at91_pmx_get_func_name,
.get_function_groups = at91_pmx_get_groups,
- .enable = at91_pmx_enable,
+ .set_mux = at91_pmx_set,
.gpio_request_enable = at91_gpio_request_enable,
.gpio_disable_free = at91_gpio_disable_free,
};
@@ -716,6 +860,9 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
*config |= PULL_DOWN;
if (info->ops->get_schmitt_trig && info->ops->get_schmitt_trig(pio, pin))
*config |= DIS_SCHMIT;
+ if (info->ops->get_drivestrength)
+ *config |= (info->ops->get_drivestrength(pio, pin)
+ << DRIVE_STRENGTH_SHIFT);
return 0;
}
@@ -729,6 +876,7 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
void __iomem *pio;
int i;
unsigned long config;
+ unsigned pin;
for (i = 0; i < num_configs; i++) {
config = configs[i];
@@ -737,7 +885,8 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
"%s:%d, pin_id=%d, config=0x%lx",
__func__, __LINE__, pin_id, config);
pio = pin_to_controller(info, pin_to_bank(pin_id));
- mask = pin_to_mask(pin_id % MAX_NB_GPIO_PER_BANK);
+ pin = pin_id % MAX_NB_GPIO_PER_BANK;
+ mask = pin_to_mask(pin);
if (config & PULL_UP && config & PULL_DOWN)
return -EINVAL;
@@ -753,6 +902,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
info->ops->set_pulldown(pio, mask, config & PULL_DOWN);
if (info->ops->disable_schmitt_trig && config & DIS_SCHMIT)
info->ops->disable_schmitt_trig(pio, mask);
+ if (info->ops->set_drivestrength)
+ info->ops->set_drivestrength(pio, pin,
+ (config & DRIVE_STRENGTH)
+ >> DRIVE_STRENGTH_SHIFT);
} /* for each config */
@@ -768,6 +921,15 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
} \
} while (0)
+#define DBG_SHOW_FLAG_MASKED(mask,flag) do { \
+ if ((config & mask) == flag) { \
+ if (num_conf) \
+ seq_puts(s, "|"); \
+ seq_puts(s, #flag); \
+ num_conf++; \
+ } \
+} while (0)
+
static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
@@ -781,6 +943,9 @@ static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev,
DBG_SHOW_FLAG(PULL_DOWN);
DBG_SHOW_FLAG(DIS_SCHMIT);
DBG_SHOW_FLAG(DEGLITCH);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_LOW);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_MED);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_HI);
DBG_SHOW_FLAG(DEBOUNCE);
if (config & DEBOUNCE) {
val = config >> DEBOUNCE_VAL_SHIFT;
@@ -945,6 +1110,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
}
static struct of_device_id at91_pinctrl_of_match[] = {
+ { .compatible = "atmel,sama5d3-pinctrl", .data = &sama5d3_ops },
{ .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops },
{ .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops },
{ /* sentinel */ }
@@ -1445,7 +1611,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
/* now it may re-trigger */
}
-static int at91_gpio_of_irq_setup(struct device_node *node,
+static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
struct at91_gpio_chip *prev = NULL;
@@ -1470,9 +1636,11 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
0,
handle_edge_irq,
IRQ_TYPE_EDGE_BOTH);
- if (ret)
- panic("at91_gpio.%d: couldn't allocate irq domain (DT).\n",
+ if (ret) {
+ dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
at91_gpio->pioc_idx);
+ return ret;
+ }
/* Setup chained handler */
if (at91_gpio->pioc_idx)
@@ -1575,19 +1743,22 @@ static int at91_gpio_probe(struct platform_device *pdev)
at91_chip->pioc_virq = irq;
at91_chip->pioc_idx = alias_idx;
- at91_chip->clock = clk_get(&pdev->dev, NULL);
+ at91_chip->clock = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(at91_chip->clock)) {
dev_err(&pdev->dev, "failed to get clock, ignoring.\n");
+ ret = PTR_ERR(at91_chip->clock);
goto err;
}
- if (clk_prepare(at91_chip->clock))
- goto clk_prep_err;
+ ret = clk_prepare(at91_chip->clock);
+ if (ret)
+ goto clk_prepare_err;
/* enable PIO controller's clock */
- if (clk_enable(at91_chip->clock)) {
+ ret = clk_enable(at91_chip->clock);
+ if (ret) {
dev_err(&pdev->dev, "failed to enable clock, ignoring.\n");
- goto clk_err;
+ goto clk_enable_err;
}
at91_chip->chip = at91_gpio_template;
@@ -1612,7 +1783,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
if (!names) {
ret = -ENOMEM;
- goto clk_err;
+ goto clk_enable_err;
}
for (i = 0; i < chip->ngpio; i++)
@@ -1630,23 +1801,28 @@ static int at91_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add(chip);
if (ret)
- goto clk_err;
+ goto gpiochip_add_err;
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
at91_gpio_probe_fixup();
- at91_gpio_of_irq_setup(np, at91_chip);
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+ goto irq_setup_err;
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
return 0;
-clk_err:
+irq_setup_err:
+ gpiochip_remove(chip);
+gpiochip_add_err:
+ clk_disable(at91_chip->clock);
+clk_enable_err:
clk_unprepare(at91_chip->clock);
-clk_prep_err:
- clk_put(at91_chip->clock);
+clk_prepare_err:
err:
dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx);
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
index c5ca9e633fff..a26e0c2ba33e 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -1055,9 +1055,9 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int bcm281xx_pinmux_enable(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
const struct bcm281xx_pin_function *f = &pdata->functions[function];
@@ -1084,7 +1084,7 @@ static struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
.get_functions_count = bcm281xx_pinctrl_get_fcns_count,
.get_function_name = bcm281xx_pinctrl_get_fcn_name,
.get_function_groups = bcm281xx_pinctrl_get_fcn_groups,
- .enable = bcm281xx_pinmux_enable,
+ .set_mux = bcm281xx_pinmux_set,
};
static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index 5bcfd7ace0cd..eabba02f71f9 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -830,7 +830,7 @@ static int bcm2835_pmx_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int bcm2835_pmx_enable(struct pinctrl_dev *pctldev,
+static int bcm2835_pmx_set(struct pinctrl_dev *pctldev,
unsigned func_selector,
unsigned group_selector)
{
@@ -869,7 +869,7 @@ static const struct pinmux_ops bcm2835_pmx_ops = {
.get_functions_count = bcm2835_pmx_get_functions_count,
.get_function_name = bcm2835_pmx_get_function_name,
.get_function_groups = bcm2835_pmx_get_function_groups,
- .enable = bcm2835_pmx_enable,
+ .set_mux = bcm2835_pmx_set,
.gpio_disable_free = bcm2835_pmx_gpio_disable_free,
.gpio_set_direction = bcm2835_pmx_gpio_set_direction,
};
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index d22ca252b80d..296e5b37f768 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -257,9 +257,9 @@ static int match_group_mux(const struct ltq_pin_group *grp,
return ret;
}
-static int ltq_pmx_enable(struct pinctrl_dev *pctrldev,
- unsigned func,
- unsigned group)
+static int ltq_pmx_set(struct pinctrl_dev *pctrldev,
+ unsigned func,
+ unsigned group)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
const struct ltq_pin_group *pin_grp = &info->grps[group];
@@ -316,7 +316,7 @@ static const struct pinmux_ops ltq_pmx_ops = {
.get_functions_count = ltq_pmx_func_count,
.get_function_name = ltq_pmx_func_name,
.get_function_groups = ltq_pmx_get_groups,
- .enable = ltq_pmx_enable,
+ .set_mux = ltq_pmx_set,
.gpio_request_enable = ltq_pmx_gpio_request_enable,
};
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index f13d0e78a41c..e3079d3d19fe 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -685,7 +685,8 @@ static int palmas_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int palmas_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+static int palmas_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
unsigned group)
{
struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
@@ -742,7 +743,7 @@ static const struct pinmux_ops palmas_pinmux_ops = {
.get_functions_count = palmas_pinctrl_get_funcs_count,
.get_function_name = palmas_pinctrl_get_func_name,
.get_function_groups = palmas_pinctrl_get_func_groups,
- .enable = palmas_pinctrl_enable,
+ .set_mux = palmas_pinctrl_set_mux,
};
static int palmas_pinconf_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 0c372a300cb8..016f4578e494 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -816,8 +816,8 @@ static int rockchip_pmx_get_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins = info->groups[group].pins;
@@ -892,7 +892,7 @@ static const struct pinmux_ops rockchip_pmx_ops = {
.get_functions_count = rockchip_pmx_get_funcs_count,
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
- .enable = rockchip_pmx_enable,
+ .set_mux = rockchip_pmx_set,
.gpio_set_direction = rockchip_pmx_gpio_set_direction,
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 95dd9cf55cb3..fb94b772ad62 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -447,7 +447,7 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
return 0;
}
-static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
+static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
struct pcs_device *pcs;
@@ -519,7 +519,7 @@ static const struct pinmux_ops pcs_pinmux_ops = {
.get_functions_count = pcs_get_functions_count,
.get_function_name = pcs_get_function_name,
.get_function_groups = pcs_get_function_groups,
- .enable = pcs_enable,
+ .set_mux = pcs_set_mux,
.gpio_request_enable = pcs_request_gpio,
};
@@ -1981,6 +1981,18 @@ static const struct pcs_soc_data pinctrl_single_omap_wkup = {
.irq_status_mask = (1 << 15), /* OMAP_WAKEUP_EVENT */
};
+static const struct pcs_soc_data pinctrl_single_dra7 = {
+ .flags = PCS_QUIRK_SHARED_IRQ,
+ .irq_enable_mask = (1 << 24), /* WAKEUPENABLE */
+ .irq_status_mask = (1 << 25), /* WAKEUPEVENT */
+};
+
+static const struct pcs_soc_data pinctrl_single_am437x = {
+ .flags = PCS_QUIRK_SHARED_IRQ,
+ .irq_enable_mask = (1 << 29), /* OMAP_WAKEUP_EN */
+ .irq_status_mask = (1 << 30), /* OMAP_WAKEUP_EVENT */
+};
+
static const struct pcs_soc_data pinctrl_single = {
};
@@ -1992,6 +2004,8 @@ static struct of_device_id pcs_of_match[] = {
{ .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,dra7-padconf", .data = &pinctrl_single_dra7 },
+ { .compatible = "ti,am437-padconf", .data = &pinctrl_single_am437x },
{ .compatible = "pinctrl-single", .data = &pinctrl_single },
{ .compatible = "pinconf-single", .data = &pinconf_single },
{ },
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 5475374d803f..4b1792aad3d8 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -914,8 +914,8 @@ static struct st_pio_control *st_get_pio_control(
return &bank->pc;
}
-static int st_pmx_enable(struct pinctrl_dev *pctldev, unsigned fselector,
- unsigned group)
+static int st_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
+ unsigned group)
{
struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct st_pinconf *conf = info->groups[group].pin_conf;
@@ -951,7 +951,7 @@ static struct pinmux_ops st_pmxops = {
.get_functions_count = st_pmx_get_funcs_count,
.get_function_name = st_pmx_get_fname,
.get_function_groups = st_pmx_get_groups,
- .enable = st_pmx_enable,
+ .set_mux = st_pmx_set_mux,
.gpio_set_direction = st_pmx_set_gpio_direction,
};
@@ -1517,6 +1517,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
0, handle_simple_irq,
IRQ_TYPE_LEVEL_LOW);
if (err) {
+ gpiochip_remove(&bank->gpio_chip);
dev_info(dev, "could not add irqchip\n");
return err;
}
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 71c5d4f0c538..3b9bfcf717ac 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -697,7 +697,7 @@ static void tb10x_gpio_disable_free(struct pinctrl_dev *pctl,
mutex_unlock(&state->mutex);
}
-static int tb10x_pctl_enable(struct pinctrl_dev *pctl,
+static int tb10x_pctl_set_mux(struct pinctrl_dev *pctl,
unsigned func_selector, unsigned group_selector)
{
struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
@@ -744,7 +744,7 @@ static struct pinmux_ops tb10x_pinmux_ops = {
.get_function_groups = tb10x_get_function_groups,
.gpio_request_enable = tb10x_gpio_request_enable,
.gpio_disable_free = tb10x_gpio_disable_free,
- .enable = tb10x_pctl_enable,
+ .set_mux = tb10x_pctl_set_mux,
};
static struct pinctrl_desc tb10x_pindesc = {
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
index e641b4226c42..1631ec94fb02 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -281,9 +281,9 @@ static int tegra_xusb_padctl_get_function_groups(struct pinctrl_dev *pinctrl,
return 0;
}
-static int tegra_xusb_padctl_pinmux_enable(struct pinctrl_dev *pinctrl,
- unsigned int function,
- unsigned int group)
+static int tegra_xusb_padctl_pinmux_set(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ unsigned int group)
{
struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
const struct tegra_xusb_padctl_lane *lane;
@@ -311,7 +311,7 @@ static const struct pinmux_ops tegra_xusb_padctl_pinmux_ops = {
.get_functions_count = tegra_xusb_padctl_get_functions_count,
.get_function_name = tegra_xusb_padctl_get_function_name,
.get_function_groups = tegra_xusb_padctl_get_function_groups,
- .enable = tegra_xusb_padctl_pinmux_enable,
+ .set_mux = tegra_xusb_padctl_pinmux_set,
};
static int tegra_xusb_padctl_pinconf_group_get(struct pinctrl_dev *pinctrl,
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 150af5503c09..e5949d51bc52 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -262,8 +262,9 @@ static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
@@ -294,7 +295,7 @@ static const struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
- .enable = tegra_pinctrl_enable,
+ .set_mux = tegra_pinctrl_set_mux,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 33614baab4c0..a3db85b0b75f 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -1850,7 +1850,7 @@ static int tegra114_pinctrl_probe(struct platform_device *pdev)
return tegra_pinctrl_probe(pdev, &tegra114_pinctrl);
}
-static struct of_device_id tegra114_pinctrl_of_match[] = {
+static const struct of_device_id tegra114_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra114-pinmux", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index e80797e20017..2f9b75c14967 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -224,6 +224,16 @@
#define TEGRA_PIN_OWR _PIN(5)
#define TEGRA_PIN_CLK_32K_IN _PIN(6)
#define TEGRA_PIN_JTAG_RTCK _PIN(7)
+#define TEGRA_PIN_DSI_B_CLK_P _PIN(8)
+#define TEGRA_PIN_DSI_B_CLK_N _PIN(9)
+#define TEGRA_PIN_DSI_B_D0_P _PIN(10)
+#define TEGRA_PIN_DSI_B_D0_N _PIN(11)
+#define TEGRA_PIN_DSI_B_D1_P _PIN(12)
+#define TEGRA_PIN_DSI_B_D1_N _PIN(13)
+#define TEGRA_PIN_DSI_B_D2_P _PIN(14)
+#define TEGRA_PIN_DSI_B_D2_N _PIN(15)
+#define TEGRA_PIN_DSI_B_D3_P _PIN(16)
+#define TEGRA_PIN_DSI_B_D3_N _PIN(17)
static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
@@ -417,6 +427,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_P, "DSI_B_CLK_P"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_N, "DSI_B_CLK_N"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D0_P, "DSI_B_D0_P"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D0_N, "DSI_B_D0_N"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D1_P, "DSI_B_D1_P"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D1_N, "DSI_B_D1_N"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D2_P, "DSI_B_D2_P"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D2_N, "DSI_B_D2_N"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D3_P, "DSI_B_D3_P"),
+ PINCTRL_PIN(TEGRA_PIN_DSI_B_D3_N, "DSI_B_D3_N"),
};
static const unsigned clk_32k_out_pa0_pins[] = {
@@ -1495,6 +1515,19 @@ static const unsigned drive_ao4_pins[] = {
TEGRA_PIN_JTAG_RTCK,
};
+static const unsigned mipi_pad_ctrl_dsi_b_pins[] = {
+ TEGRA_PIN_DSI_B_CLK_P,
+ TEGRA_PIN_DSI_B_CLK_N,
+ TEGRA_PIN_DSI_B_D0_P,
+ TEGRA_PIN_DSI_B_D0_N,
+ TEGRA_PIN_DSI_B_D1_P,
+ TEGRA_PIN_DSI_B_D1_N,
+ TEGRA_PIN_DSI_B_D2_P,
+ TEGRA_PIN_DSI_B_D2_N,
+ TEGRA_PIN_DSI_B_D3_P,
+ TEGRA_PIN_DSI_B_D3_N,
+};
+
enum tegra_mux {
TEGRA_MUX_BLINK,
TEGRA_MUX_CCLA,
@@ -1580,6 +1613,8 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
TEGRA_MUX_VIMCLK2,
TEGRA_MUX_VIMCLK2_ALT,
+ TEGRA_MUX_CSI,
+ TEGRA_MUX_DSI_B,
};
#define FUNCTION(fname) \
@@ -1672,10 +1707,13 @@ static struct tegra_function tegra124_functions[] = {
FUNCTION(vi_alt3),
FUNCTION(vimclk2),
FUNCTION(vimclk2_alt),
+ FUNCTION(csi),
+ FUNCTION(dsi_b),
};
#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */
#define PINGROUP_REG(r) ((r) - PINGROUP_REG_A)
@@ -1744,6 +1782,32 @@ static struct tegra_function tegra124_functions[] = {
.drvtype_bit = PINGROUP_BIT_##drvtype(6), \
}
+#define MIPI_PAD_CTRL_PINGROUP_REG_Y(r) ((r) - MIPI_PAD_CTRL_PINGROUP_REG_A)
+
+#define MIPI_PAD_CTRL_PINGROUP(pg_name, r, b, f0, f1) \
+ { \
+ .name = "mipi_pad_ctrl_" #pg_name, \
+ .pins = mipi_pad_ctrl_##pg_name##_pins, \
+ .npins = ARRAY_SIZE(mipi_pad_ctrl_##pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_RSVD3, \
+ TEGRA_MUX_RSVD4, \
+ }, \
+ .mux_reg = MIPI_PAD_CTRL_PINGROUP_REG_Y(r), \
+ .mux_bank = 2, \
+ .mux_bit = b, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_bit = -1, \
+ .odrain_bit = -1, \
+ .lock_bit = -1, \
+ .ioreset_bit = -1, \
+ .rcv_sel_bit = -1, \
+ .drv_reg = -1, \
+ }
+
static const struct tegra_pingroup tegra124_groups[] = {
/* pg_name, f0, f1, f2, f3, r, od, ior, rcv_sel */
PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, 0x3000, N, N, N),
@@ -1979,6 +2043,9 @@ static const struct tegra_pingroup tegra124_groups[] = {
DRV_PINGROUP(hv0, 0x9b4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
DRV_PINGROUP(sdio4, 0x9c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
DRV_PINGROUP(ao4, 0x9c8, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+
+ /* pg_name, r b f0, f1 */
+ MIPI_PAD_CTRL_PINGROUP(dsi_b, 0x820, 1, CSI, DSI_B)
};
static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
@@ -1996,7 +2063,7 @@ static int tegra124_pinctrl_probe(struct platform_device *pdev)
return tegra_pinctrl_probe(pdev, &tegra124_pinctrl);
}
-static struct of_device_id tegra124_pinctrl_of_match[] = {
+static const struct of_device_id tegra124_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra124-pinmux", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index 7563ebc9c791..c9805d2e71b0 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -2228,7 +2228,7 @@ static int tegra20_pinctrl_probe(struct platform_device *pdev)
return tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
}
-static struct of_device_id tegra20_pinctrl_of_match[] = {
+static const struct of_device_id tegra20_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra20-pinmux", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index fe2d2cf78ad9..e7b72e916558 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -2484,7 +2484,7 @@ static int tegra30_pinctrl_probe(struct platform_device *pdev)
return tegra_pinctrl_probe(pdev, &tegra30_pinctrl);
}
-static struct of_device_id tegra30_pinctrl_of_match[] = {
+static const struct of_device_id tegra30_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra30-pinmux", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index 41e81a35cabb..3bb6a3b78864 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -547,8 +547,9 @@ static void tz1090_pdc_pinctrl_mux(struct tz1090_pdc_pmx *pmx,
__global_unlock2(flags);
}
-static int tz1090_pdc_pinctrl_enable(struct pinctrl_dev *pctldev,
- unsigned int function, unsigned int group)
+static int tz1090_pdc_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
{
struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group];
@@ -634,7 +635,7 @@ static struct pinmux_ops tz1090_pdc_pinmux_ops = {
.get_functions_count = tz1090_pdc_pinctrl_get_funcs_count,
.get_function_name = tz1090_pdc_pinctrl_get_func_name,
.get_function_groups = tz1090_pdc_pinctrl_get_func_groups,
- .enable = tz1090_pdc_pinctrl_enable,
+ .set_mux = tz1090_pdc_pinctrl_set_mux,
.gpio_request_enable = tz1090_pdc_pinctrl_gpio_request_enable,
.gpio_disable_free = tz1090_pdc_pinctrl_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 24082216842e..48d36413b99f 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1415,8 +1415,8 @@ found_mux:
* the effect is the same as enabling the function on each individual pin in the
* group.
*/
-static int tz1090_pinctrl_enable(struct pinctrl_dev *pctldev,
- unsigned int function, unsigned int group)
+static int tz1090_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
{
struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
struct tz1090_pingroup *grp;
@@ -1517,7 +1517,7 @@ static struct pinmux_ops tz1090_pinmux_ops = {
.get_functions_count = tz1090_pinctrl_get_funcs_count,
.get_function_name = tz1090_pinctrl_get_func_name,
.get_function_groups = tz1090_pinctrl_get_func_groups,
- .enable = tz1090_pinctrl_enable,
+ .set_mux = tz1090_pinctrl_set_mux,
.gpio_request_enable = tz1090_pinctrl_gpio_request_enable,
.gpio_disable_free = tz1090_pinctrl_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 0959bb36450f..e9c7113d81f2 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -955,8 +955,8 @@ static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
}
}
-static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int u300_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct u300_pmx *upmx;
@@ -994,7 +994,7 @@ static const struct pinmux_ops u300_pmx_ops = {
.get_functions_count = u300_pmx_get_funcs_count,
.get_function_name = u300_pmx_get_func_name,
.get_function_groups = u300_pmx_get_groups,
- .enable = u300_pmx_enable,
+ .set_mux = u300_pmx_set_mux,
};
static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index e66f4cae7633..37040ab42890 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -801,6 +801,7 @@ static int pinmux_xway_probe(struct platform_device *pdev)
of_gpiochip_add(&xway_chip);
ret = gpiochip_add(&xway_chip);
if (ret) {
+ of_gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
}
@@ -822,6 +823,7 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* register with the generic lantiq layer */
ret = ltq_pinctrl_register(pdev, &xway_info);
if (ret) {
+ gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register pinctrl driver\n");
return ret;
}
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index c055daf9a80f..b874458dcb88 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -41,7 +41,7 @@ int pinmux_check_ops(struct pinctrl_dev *pctldev)
!ops->get_functions_count ||
!ops->get_function_name ||
!ops->get_function_groups ||
- !ops->enable) {
+ !ops->set_mux) {
dev_err(pctldev->dev, "pinmux ops lacks necessary functions\n");
return -EINVAL;
}
@@ -445,15 +445,15 @@ int pinmux_enable_setting(struct pinctrl_setting const *setting)
desc->mux_setting = &(setting->data.mux);
}
- ret = ops->enable(pctldev, setting->data.mux.func,
- setting->data.mux.group);
+ ret = ops->set_mux(pctldev, setting->data.mux.func,
+ setting->data.mux.group);
if (ret)
- goto err_enable;
+ goto err_set_mux;
return 0;
-err_enable:
+err_set_mux:
for (i = 0; i < num_pins; i++) {
desc = pin_desc_get(pctldev, pins[i]);
if (desc)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index d160a710d704..81275af9638b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -15,6 +15,14 @@ config PINCTRL_APQ8064
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8064 platform.
+config PINCTRL_APQ8084
+ tristate "Qualcomm APQ8084 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
+
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 2a02602d715c..ba8519fcd8d3 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -1,6 +1,7 @@
# Qualcomm pin control drivers
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
+obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index feb6f152f9b7..c832d7d6b912 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -258,6 +258,7 @@ static const unsigned int sdc3_data_pins[] = { 95 };
.intr_status_bit = 0, \
.intr_ack_high = 1, \
.intr_target_bit = 0, \
+ .intr_target_kpss_val = 4, \
.intr_raw_status_bit = 3, \
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
@@ -283,6 +284,7 @@ static const unsigned int sdc3_data_pins[] = { 95 };
.intr_enable_bit = -1, \
.intr_status_bit = -1, \
.intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
.intr_raw_status_bit = -1, \
.intr_polarity_bit = -1, \
.intr_detection_bit = -1, \
@@ -324,6 +326,7 @@ enum apq8064_functions {
APQ_MUX_tsif1,
APQ_MUX_tsif2,
APQ_MUX_usb2_hsic,
+ APQ_MUX_ps_hold,
APQ_MUX_NA,
};
@@ -351,6 +354,9 @@ static const char * const gpio_groups[] = {
"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
"gpio85", "gpio86", "gpio87", "gpio88", "gpio89"
};
+static const char * const ps_hold_groups[] = {
+ "gpio78"
+};
static const char * const gsbi1_groups[] = {
"gpio18", "gpio19", "gpio20", "gpio21"
};
@@ -477,6 +483,7 @@ static const struct msm_function apq8064_functions[] = {
FUNCTION(tsif1),
FUNCTION(tsif2),
FUNCTION(usb2_hsic),
+ FUNCTION(ps_hold),
};
static const struct msm_pingroup apq8064_groups[] = {
@@ -558,7 +565,7 @@ static const struct msm_pingroup apq8064_groups[] = {
PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
new file mode 100644
index 000000000000..138cbf6134a5
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -0,0 +1,1245 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc apq8084_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+
+ PINCTRL_PIN(147, "SDC1_CLK"),
+ PINCTRL_PIN(148, "SDC1_CMD"),
+ PINCTRL_PIN(149, "SDC1_DATA"),
+ PINCTRL_PIN(150, "SDC2_CLK"),
+ PINCTRL_PIN(151, "SDC2_CMD"),
+ PINCTRL_PIN(152, "SDC2_DATA"),
+};
+
+#define DECLARE_APQ_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_APQ_GPIO_PINS(0);
+DECLARE_APQ_GPIO_PINS(1);
+DECLARE_APQ_GPIO_PINS(2);
+DECLARE_APQ_GPIO_PINS(3);
+DECLARE_APQ_GPIO_PINS(4);
+DECLARE_APQ_GPIO_PINS(5);
+DECLARE_APQ_GPIO_PINS(6);
+DECLARE_APQ_GPIO_PINS(7);
+DECLARE_APQ_GPIO_PINS(8);
+DECLARE_APQ_GPIO_PINS(9);
+DECLARE_APQ_GPIO_PINS(10);
+DECLARE_APQ_GPIO_PINS(11);
+DECLARE_APQ_GPIO_PINS(12);
+DECLARE_APQ_GPIO_PINS(13);
+DECLARE_APQ_GPIO_PINS(14);
+DECLARE_APQ_GPIO_PINS(15);
+DECLARE_APQ_GPIO_PINS(16);
+DECLARE_APQ_GPIO_PINS(17);
+DECLARE_APQ_GPIO_PINS(18);
+DECLARE_APQ_GPIO_PINS(19);
+DECLARE_APQ_GPIO_PINS(20);
+DECLARE_APQ_GPIO_PINS(21);
+DECLARE_APQ_GPIO_PINS(22);
+DECLARE_APQ_GPIO_PINS(23);
+DECLARE_APQ_GPIO_PINS(24);
+DECLARE_APQ_GPIO_PINS(25);
+DECLARE_APQ_GPIO_PINS(26);
+DECLARE_APQ_GPIO_PINS(27);
+DECLARE_APQ_GPIO_PINS(28);
+DECLARE_APQ_GPIO_PINS(29);
+DECLARE_APQ_GPIO_PINS(30);
+DECLARE_APQ_GPIO_PINS(31);
+DECLARE_APQ_GPIO_PINS(32);
+DECLARE_APQ_GPIO_PINS(33);
+DECLARE_APQ_GPIO_PINS(34);
+DECLARE_APQ_GPIO_PINS(35);
+DECLARE_APQ_GPIO_PINS(36);
+DECLARE_APQ_GPIO_PINS(37);
+DECLARE_APQ_GPIO_PINS(38);
+DECLARE_APQ_GPIO_PINS(39);
+DECLARE_APQ_GPIO_PINS(40);
+DECLARE_APQ_GPIO_PINS(41);
+DECLARE_APQ_GPIO_PINS(42);
+DECLARE_APQ_GPIO_PINS(43);
+DECLARE_APQ_GPIO_PINS(44);
+DECLARE_APQ_GPIO_PINS(45);
+DECLARE_APQ_GPIO_PINS(46);
+DECLARE_APQ_GPIO_PINS(47);
+DECLARE_APQ_GPIO_PINS(48);
+DECLARE_APQ_GPIO_PINS(49);
+DECLARE_APQ_GPIO_PINS(50);
+DECLARE_APQ_GPIO_PINS(51);
+DECLARE_APQ_GPIO_PINS(52);
+DECLARE_APQ_GPIO_PINS(53);
+DECLARE_APQ_GPIO_PINS(54);
+DECLARE_APQ_GPIO_PINS(55);
+DECLARE_APQ_GPIO_PINS(56);
+DECLARE_APQ_GPIO_PINS(57);
+DECLARE_APQ_GPIO_PINS(58);
+DECLARE_APQ_GPIO_PINS(59);
+DECLARE_APQ_GPIO_PINS(60);
+DECLARE_APQ_GPIO_PINS(61);
+DECLARE_APQ_GPIO_PINS(62);
+DECLARE_APQ_GPIO_PINS(63);
+DECLARE_APQ_GPIO_PINS(64);
+DECLARE_APQ_GPIO_PINS(65);
+DECLARE_APQ_GPIO_PINS(66);
+DECLARE_APQ_GPIO_PINS(67);
+DECLARE_APQ_GPIO_PINS(68);
+DECLARE_APQ_GPIO_PINS(69);
+DECLARE_APQ_GPIO_PINS(70);
+DECLARE_APQ_GPIO_PINS(71);
+DECLARE_APQ_GPIO_PINS(72);
+DECLARE_APQ_GPIO_PINS(73);
+DECLARE_APQ_GPIO_PINS(74);
+DECLARE_APQ_GPIO_PINS(75);
+DECLARE_APQ_GPIO_PINS(76);
+DECLARE_APQ_GPIO_PINS(77);
+DECLARE_APQ_GPIO_PINS(78);
+DECLARE_APQ_GPIO_PINS(79);
+DECLARE_APQ_GPIO_PINS(80);
+DECLARE_APQ_GPIO_PINS(81);
+DECLARE_APQ_GPIO_PINS(82);
+DECLARE_APQ_GPIO_PINS(83);
+DECLARE_APQ_GPIO_PINS(84);
+DECLARE_APQ_GPIO_PINS(85);
+DECLARE_APQ_GPIO_PINS(86);
+DECLARE_APQ_GPIO_PINS(87);
+DECLARE_APQ_GPIO_PINS(88);
+DECLARE_APQ_GPIO_PINS(89);
+DECLARE_APQ_GPIO_PINS(90);
+DECLARE_APQ_GPIO_PINS(91);
+DECLARE_APQ_GPIO_PINS(92);
+DECLARE_APQ_GPIO_PINS(93);
+DECLARE_APQ_GPIO_PINS(94);
+DECLARE_APQ_GPIO_PINS(95);
+DECLARE_APQ_GPIO_PINS(96);
+DECLARE_APQ_GPIO_PINS(97);
+DECLARE_APQ_GPIO_PINS(98);
+DECLARE_APQ_GPIO_PINS(99);
+DECLARE_APQ_GPIO_PINS(100);
+DECLARE_APQ_GPIO_PINS(101);
+DECLARE_APQ_GPIO_PINS(102);
+DECLARE_APQ_GPIO_PINS(103);
+DECLARE_APQ_GPIO_PINS(104);
+DECLARE_APQ_GPIO_PINS(105);
+DECLARE_APQ_GPIO_PINS(106);
+DECLARE_APQ_GPIO_PINS(107);
+DECLARE_APQ_GPIO_PINS(108);
+DECLARE_APQ_GPIO_PINS(109);
+DECLARE_APQ_GPIO_PINS(110);
+DECLARE_APQ_GPIO_PINS(111);
+DECLARE_APQ_GPIO_PINS(112);
+DECLARE_APQ_GPIO_PINS(113);
+DECLARE_APQ_GPIO_PINS(114);
+DECLARE_APQ_GPIO_PINS(115);
+DECLARE_APQ_GPIO_PINS(116);
+DECLARE_APQ_GPIO_PINS(117);
+DECLARE_APQ_GPIO_PINS(118);
+DECLARE_APQ_GPIO_PINS(119);
+DECLARE_APQ_GPIO_PINS(120);
+DECLARE_APQ_GPIO_PINS(121);
+DECLARE_APQ_GPIO_PINS(122);
+DECLARE_APQ_GPIO_PINS(123);
+DECLARE_APQ_GPIO_PINS(124);
+DECLARE_APQ_GPIO_PINS(125);
+DECLARE_APQ_GPIO_PINS(126);
+DECLARE_APQ_GPIO_PINS(127);
+DECLARE_APQ_GPIO_PINS(128);
+DECLARE_APQ_GPIO_PINS(129);
+DECLARE_APQ_GPIO_PINS(130);
+DECLARE_APQ_GPIO_PINS(131);
+DECLARE_APQ_GPIO_PINS(132);
+DECLARE_APQ_GPIO_PINS(133);
+DECLARE_APQ_GPIO_PINS(134);
+DECLARE_APQ_GPIO_PINS(135);
+DECLARE_APQ_GPIO_PINS(136);
+DECLARE_APQ_GPIO_PINS(137);
+DECLARE_APQ_GPIO_PINS(138);
+DECLARE_APQ_GPIO_PINS(139);
+DECLARE_APQ_GPIO_PINS(140);
+DECLARE_APQ_GPIO_PINS(141);
+DECLARE_APQ_GPIO_PINS(142);
+DECLARE_APQ_GPIO_PINS(143);
+DECLARE_APQ_GPIO_PINS(144);
+DECLARE_APQ_GPIO_PINS(145);
+DECLARE_APQ_GPIO_PINS(146);
+
+static const unsigned int sdc1_clk_pins[] = { 147 };
+static const unsigned int sdc1_cmd_pins[] = { 148 };
+static const unsigned int sdc1_data_pins[] = { 149 };
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+
+#define FUNCTION(fname) \
+ [APQ_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ APQ_MUX_gpio, \
+ APQ_MUX_##f1, \
+ APQ_MUX_##f2, \
+ APQ_MUX_##f3, \
+ APQ_MUX_##f4, \
+ APQ_MUX_##f5, \
+ APQ_MUX_##f6, \
+ APQ_MUX_##f7 \
+ }, \
+ .nfuncs = 8, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x1008 + 0x10 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_ack_high = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum apq8084_functions {
+ APQ_MUX_adsp_ext,
+ APQ_MUX_audio_ref,
+ APQ_MUX_blsp_i2c1,
+ APQ_MUX_blsp_i2c2,
+ APQ_MUX_blsp_i2c3,
+ APQ_MUX_blsp_i2c4,
+ APQ_MUX_blsp_i2c5,
+ APQ_MUX_blsp_i2c6,
+ APQ_MUX_blsp_i2c7,
+ APQ_MUX_blsp_i2c8,
+ APQ_MUX_blsp_i2c9,
+ APQ_MUX_blsp_i2c10,
+ APQ_MUX_blsp_i2c11,
+ APQ_MUX_blsp_i2c12,
+ APQ_MUX_blsp_spi1,
+ APQ_MUX_blsp_spi1_cs1,
+ APQ_MUX_blsp_spi1_cs2,
+ APQ_MUX_blsp_spi1_cs3,
+ APQ_MUX_blsp_spi2,
+ APQ_MUX_blsp_spi3,
+ APQ_MUX_blsp_spi3_cs1,
+ APQ_MUX_blsp_spi3_cs2,
+ APQ_MUX_blsp_spi3_cs3,
+ APQ_MUX_blsp_spi4,
+ APQ_MUX_blsp_spi5,
+ APQ_MUX_blsp_spi6,
+ APQ_MUX_blsp_spi7,
+ APQ_MUX_blsp_spi8,
+ APQ_MUX_blsp_spi9,
+ APQ_MUX_blsp_spi10,
+ APQ_MUX_blsp_spi10_cs1,
+ APQ_MUX_blsp_spi10_cs2,
+ APQ_MUX_blsp_spi10_cs3,
+ APQ_MUX_blsp_spi11,
+ APQ_MUX_blsp_spi12,
+ APQ_MUX_blsp_uart1,
+ APQ_MUX_blsp_uart2,
+ APQ_MUX_blsp_uart3,
+ APQ_MUX_blsp_uart4,
+ APQ_MUX_blsp_uart5,
+ APQ_MUX_blsp_uart6,
+ APQ_MUX_blsp_uart7,
+ APQ_MUX_blsp_uart8,
+ APQ_MUX_blsp_uart9,
+ APQ_MUX_blsp_uart10,
+ APQ_MUX_blsp_uart11,
+ APQ_MUX_blsp_uart12,
+ APQ_MUX_blsp_uim1,
+ APQ_MUX_blsp_uim2,
+ APQ_MUX_blsp_uim3,
+ APQ_MUX_blsp_uim4,
+ APQ_MUX_blsp_uim5,
+ APQ_MUX_blsp_uim6,
+ APQ_MUX_blsp_uim7,
+ APQ_MUX_blsp_uim8,
+ APQ_MUX_blsp_uim9,
+ APQ_MUX_blsp_uim10,
+ APQ_MUX_blsp_uim11,
+ APQ_MUX_blsp_uim12,
+ APQ_MUX_cam_mclk0,
+ APQ_MUX_cam_mclk1,
+ APQ_MUX_cam_mclk2,
+ APQ_MUX_cam_mclk3,
+ APQ_MUX_cci_async,
+ APQ_MUX_cci_async_in0,
+ APQ_MUX_cci_i2c0,
+ APQ_MUX_cci_i2c1,
+ APQ_MUX_cci_timer0,
+ APQ_MUX_cci_timer1,
+ APQ_MUX_cci_timer2,
+ APQ_MUX_cci_timer3,
+ APQ_MUX_cci_timer4,
+ APQ_MUX_edp_hpd,
+ APQ_MUX_gcc_gp1,
+ APQ_MUX_gcc_gp2,
+ APQ_MUX_gcc_gp3,
+ APQ_MUX_gcc_obt,
+ APQ_MUX_gcc_vtt,
+ APQ_MUX_gp_mn,
+ APQ_MUX_gp_pdm0,
+ APQ_MUX_gp_pdm1,
+ APQ_MUX_gp_pdm2,
+ APQ_MUX_gp0_clk,
+ APQ_MUX_gp1_clk,
+ APQ_MUX_gpio,
+ APQ_MUX_hdmi_cec,
+ APQ_MUX_hdmi_ddc,
+ APQ_MUX_hdmi_dtest,
+ APQ_MUX_hdmi_hpd,
+ APQ_MUX_hdmi_rcv,
+ APQ_MUX_hsic,
+ APQ_MUX_ldo_en,
+ APQ_MUX_ldo_update,
+ APQ_MUX_mdp_vsync,
+ APQ_MUX_pci_e0,
+ APQ_MUX_pci_e0_n,
+ APQ_MUX_pci_e0_rst,
+ APQ_MUX_pci_e1,
+ APQ_MUX_pci_e1_rst,
+ APQ_MUX_pci_e1_rst_n,
+ APQ_MUX_pci_e1_clkreq_n,
+ APQ_MUX_pri_mi2s,
+ APQ_MUX_qua_mi2s,
+ APQ_MUX_sata_act,
+ APQ_MUX_sata_devsleep,
+ APQ_MUX_sata_devsleep_n,
+ APQ_MUX_sd_write,
+ APQ_MUX_sdc_emmc_mode,
+ APQ_MUX_sdc3,
+ APQ_MUX_sdc4,
+ APQ_MUX_sec_mi2s,
+ APQ_MUX_slimbus,
+ APQ_MUX_spdif_tx,
+ APQ_MUX_spkr_i2s,
+ APQ_MUX_spkr_i2s_ws,
+ APQ_MUX_spss_geni,
+ APQ_MUX_ter_mi2s,
+ APQ_MUX_tsif1,
+ APQ_MUX_tsif2,
+ APQ_MUX_uim,
+ APQ_MUX_uim_batt_alarm,
+ APQ_MUX_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146"
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio34"
+};
+static const char * const audio_ref_groups[] = {
+ "gpio100"
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3"
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7"
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11"
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio29", "gpio30"
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio41", "gpio42"
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio45", "gpio46"
+};
+static const char * const blsp_i2c7_groups[] = {
+ "gpio132", "gpio133"
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio53", "gpio54"
+};
+static const char * const blsp_i2c9_groups[] = {
+ "gpio57", "gpio58"
+};
+static const char * const blsp_i2c10_groups[] = {
+ "gpio61", "gpio62"
+};
+static const char * const blsp_i2c11_groups[] = {
+ "gpio65", "gpio66"
+};
+static const char * const blsp_i2c12_groups[] = {
+ "gpio49", "gpio50"
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio27", "gpio28", "gpio29", "gpio30"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio39", "gpio40", "gpio41", "gpio42"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio43", "gpio44", "gpio45", "gpio46"
+};
+static const char * const blsp_spi7_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133"
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio51", "gpio52", "gpio53", "gpio54"
+};
+static const char * const blsp_spi9_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58"
+};
+static const char * const blsp_spi10_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62"
+};
+static const char * const blsp_spi11_groups[] = {
+ "gpio63", "gpio64", "gpio65", "gpio66"
+};
+static const char * const blsp_spi12_groups[] = {
+ "gpio47", "gpio48", "gpio49", "gpio50"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8"
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio27", "gpio28", "gpio29", "gpio30"
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio39", "gpio40", "gpio41", "gpio42"
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio43", "gpio44", "gpio45", "gpio46"
+};
+static const char * const blsp_uart7_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133"
+};
+static const char * const blsp_uart8_groups[] = {
+ "gpio51", "gpio52", "gpio53", "gpio54"
+};
+static const char * const blsp_uart9_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58"
+};
+static const char * const blsp_uart10_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62"
+};
+static const char * const blsp_uart11_groups[] = {
+ "gpio63", "gpio64", "gpio65", "gpio66"
+};
+static const char * const blsp_uart12_groups[] = {
+ "gpio47", "gpio48", "gpio49", "gpio50"
+};
+static const char * const blsp_uim1_groups[] = {
+ "gpio0", "gpio1"
+};
+static const char * const blsp_uim2_groups[] = {
+ "gpio4", "gpio5"
+};
+static const char * const blsp_uim3_groups[] = {
+ "gpio8", "gpio9"
+};
+static const char * const blsp_uim4_groups[] = {
+ "gpio27", "gpio28"
+};
+static const char * const blsp_uim5_groups[] = {
+ "gpio39", "gpio40"
+};
+static const char * const blsp_uim6_groups[] = {
+ "gpio43", "gpio44"
+};
+static const char * const blsp_uim7_groups[] = {
+ "gpio130", "gpio131"
+};
+static const char * const blsp_uim8_groups[] = {
+ "gpio51", "gpio52"
+};
+static const char * const blsp_uim9_groups[] = {
+ "gpio55", "gpio56"
+};
+static const char * const blsp_uim10_groups[] = {
+ "gpio59", "gpio60"
+};
+static const char * const blsp_uim11_groups[] = {
+ "gpio63", "gpio64"
+};
+static const char * const blsp_uim12_groups[] = {
+ "gpio47", "gpio48"
+};
+static const char * const blsp_spi1_cs1_groups[] = {
+ "gpio116"
+};
+static const char * const blsp_spi1_cs2_groups[] = {
+ "gpio117"
+};
+static const char * const blsp_spi1_cs3_groups[] = {
+ "gpio118"
+};
+static const char * const blsp_spi3_cs1_groups[] = {
+ "gpio67"
+};
+static const char * const blsp_spi3_cs2_groups[] = {
+ "gpio71"
+};
+static const char * const blsp_spi3_cs3_groups[] = {
+ "gpio72"
+};
+static const char * const blsp_spi10_cs1_groups[] = {
+ "gpio106"
+};
+static const char * const blsp_spi10_cs2_groups[] = {
+ "gpio111"
+};
+static const char * const blsp_spi10_cs3_groups[] = {
+ "gpio128"
+};
+static const char * const cam_mclk0_groups[] = {
+ "gpio15"
+};
+static const char * const cam_mclk1_groups[] = {
+ "gpio16"
+};
+static const char * const cam_mclk2_groups[] = {
+ "gpio17"
+};
+static const char * const cam_mclk3_groups[] = {
+ "gpio18"
+};
+static const char * const cci_async_groups[] = {
+ "gpio26", "gpio119"
+};
+static const char * const cci_async_in0_groups[] = {
+ "gpio120"
+};
+static const char * const cci_i2c0_groups[] = {
+ "gpio19", "gpio20"
+};
+static const char * const cci_i2c1_groups[] = {
+ "gpio21", "gpio22"
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio23"
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio24"
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio25"
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio26"
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio119"
+};
+static const char * const edp_hpd_groups[] = {
+ "gpio103"
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio37"
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio38"
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio86"
+};
+static const char * const gcc_obt_groups[] = {
+ "gpio127"
+};
+static const char * const gcc_vtt_groups[] = {
+ "gpio126"
+};
+static const char * const gp_mn_groups[] = {
+ "gpio29"
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio48", "gpio83"
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio84", "gpio101"
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio85", "gpio110"
+};
+static const char * const gp0_clk_groups[] = {
+ "gpio25"
+};
+static const char * const gp1_clk_groups[] = {
+ "gpio26"
+};
+static const char * const hdmi_cec_groups[] = {
+ "gpio31"
+};
+static const char * const hdmi_ddc_groups[] = {
+ "gpio32", "gpio33"
+};
+static const char * const hdmi_dtest_groups[] = {
+ "gpio123"
+};
+static const char * const hdmi_hpd_groups[] = {
+ "gpio34"
+};
+static const char * const hdmi_rcv_groups[] = {
+ "gpio125"
+};
+static const char * const hsic_groups[] = {
+ "gpio134", "gpio135"
+};
+static const char * const ldo_en_groups[] = {
+ "gpio124"
+};
+static const char * const ldo_update_groups[] = {
+ "gpio125"
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio12", "gpio13", "gpio14"
+};
+static const char * const pci_e0_groups[] = {
+ "gpio68", "gpio70"
+};
+static const char * const pci_e0_n_groups[] = {
+ "gpio68", "gpio70"
+};
+static const char * const pci_e0_rst_groups[] = {
+ "gpio70"
+};
+static const char * const pci_e1_groups[] = {
+ "gpio140"
+};
+static const char * const pci_e1_rst_groups[] = {
+ "gpio140"
+};
+static const char * const pci_e1_rst_n_groups[] = {
+ "gpio140"
+};
+static const char * const pci_e1_clkreq_n_groups[] = {
+ "gpio141"
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79", "gpio80"
+};
+static const char * const qua_mi2s_groups[] = {
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97"
+};
+static const char * const sata_act_groups[] = {
+ "gpio129"
+};
+static const char * const sata_devsleep_groups[] = {
+ "gpio119"
+};
+static const char * const sata_devsleep_n_groups[] = {
+ "gpio119"
+};
+static const char * const sd_write_groups[] = {
+ "gpio75"
+};
+static const char * const sdc_emmc_mode_groups[] = {
+ "gpio146"
+};
+static const char * const sdc3_groups[] = {
+ "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72"
+};
+static const char * const sdc4_groups[] = {
+ "gpio82", "gpio83", "gpio84", "gpio85", "gpio86",
+ "gpio91", "gpio95", "gpio96", "gpio97", "gpio101"
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio81", "gpio82", "gpio83", "gpio84", "gpio85"
+};
+static const char * const slimbus_groups[] = {
+ "gpio98", "gpio99"
+};
+static const char * const spdif_tx_groups[] = {
+ "gpio124", "gpio136", "gpio142"
+};
+static const char * const spkr_i2s_groups[] = {
+ "gpio98", "gpio99", "gpio100"
+};
+static const char * const spkr_i2s_ws_groups[] = {
+ "gpio104"
+};
+static const char * const spss_geni_groups[] = {
+ "gpio8", "gpio9"
+};
+static const char * const ter_mi2s_groups[] = {
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90"
+};
+static const char * const tsif1_groups[] = {
+ "gpio82", "gpio83", "gpio84", "gpio85", "gpio86"
+};
+static const char * const tsif2_groups[] = {
+ "gpio91", "gpio95", "gpio96", "gpio97", "gpio101"
+};
+static const char * const uim_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133"
+};
+static const char * const uim_batt_alarm_groups[] = {
+ "gpio102"
+};
+static const struct msm_function apq8084_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(audio_ref),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(blsp_i2c9),
+ FUNCTION(blsp_i2c10),
+ FUNCTION(blsp_i2c11),
+ FUNCTION(blsp_i2c12),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_spi7),
+ FUNCTION(blsp_spi8),
+ FUNCTION(blsp_spi9),
+ FUNCTION(blsp_spi10),
+ FUNCTION(blsp_spi10_cs1),
+ FUNCTION(blsp_spi10_cs2),
+ FUNCTION(blsp_spi10_cs3),
+ FUNCTION(blsp_spi11),
+ FUNCTION(blsp_spi12),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_uart6),
+ FUNCTION(blsp_uart7),
+ FUNCTION(blsp_uart8),
+ FUNCTION(blsp_uart9),
+ FUNCTION(blsp_uart10),
+ FUNCTION(blsp_uart11),
+ FUNCTION(blsp_uart12),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(blsp_uim3),
+ FUNCTION(blsp_uim4),
+ FUNCTION(blsp_uim5),
+ FUNCTION(blsp_uim6),
+ FUNCTION(blsp_uim7),
+ FUNCTION(blsp_uim8),
+ FUNCTION(blsp_uim9),
+ FUNCTION(blsp_uim10),
+ FUNCTION(blsp_uim11),
+ FUNCTION(blsp_uim12),
+ FUNCTION(cam_mclk0),
+ FUNCTION(cam_mclk1),
+ FUNCTION(cam_mclk2),
+ FUNCTION(cam_mclk3),
+ FUNCTION(cci_async),
+ FUNCTION(cci_async_in0),
+ FUNCTION(cci_i2c0),
+ FUNCTION(cci_i2c1),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(edp_hpd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_obt),
+ FUNCTION(gcc_vtt),
+ FUNCTION(gp_mn),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gp0_clk),
+ FUNCTION(gp1_clk),
+ FUNCTION(gpio),
+ FUNCTION(hdmi_cec),
+ FUNCTION(hdmi_ddc),
+ FUNCTION(hdmi_dtest),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_rcv),
+ FUNCTION(hsic),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pci_e0),
+ FUNCTION(pci_e0_n),
+ FUNCTION(pci_e0_rst),
+ FUNCTION(pci_e1),
+ FUNCTION(pci_e1_rst),
+ FUNCTION(pci_e1_rst_n),
+ FUNCTION(pci_e1_clkreq_n),
+ FUNCTION(pri_mi2s),
+ FUNCTION(qua_mi2s),
+ FUNCTION(sata_act),
+ FUNCTION(sata_devsleep),
+ FUNCTION(sata_devsleep_n),
+ FUNCTION(sd_write),
+ FUNCTION(sdc_emmc_mode),
+ FUNCTION(sdc3),
+ FUNCTION(sdc4),
+ FUNCTION(sec_mi2s),
+ FUNCTION(slimbus),
+ FUNCTION(spdif_tx),
+ FUNCTION(spkr_i2s),
+ FUNCTION(spkr_i2s_ws),
+ FUNCTION(spss_geni),
+ FUNCTION(ter_mi2s),
+ FUNCTION(tsif1),
+ FUNCTION(tsif2),
+ FUNCTION(uim),
+ FUNCTION(uim_batt_alarm),
+};
+
+static const struct msm_pingroup apq8084_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, blsp_uart3, blsp_uim3, spss_geni, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, blsp_uim3, blsp_uart3, spss_geni, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA),
+ PINGROUP(12, mdp_vsync, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, mdp_vsync, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, mdp_vsync, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, cam_mclk0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, cam_mclk1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, cam_mclk2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, cam_mclk3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, cci_i2c0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, cci_i2c0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, cci_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, cci_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, cci_timer0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, cci_timer1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, cci_timer2, gp0_clk, NA, NA, NA, NA, NA),
+ PINGROUP(26, cci_timer3, cci_async, gp1_clk, NA, NA, NA, NA),
+ PINGROUP(27, blsp_spi4, blsp_uart4, blsp_uim4, NA, NA, NA, NA),
+ PINGROUP(28, blsp_spi4, blsp_uart4, blsp_uim4, NA, NA, NA, NA),
+ PINGROUP(29, blsp_spi4, blsp_uart4, blsp_i2c4, gp_mn, NA, NA, NA),
+ PINGROUP(30, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, NA),
+ PINGROUP(31, hdmi_cec, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, hdmi_ddc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, hdmi_ddc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, hdmi_hpd, NA, adsp_ext, NA, NA, NA, NA),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, gcc_gp1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, gcc_gp2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA),
+ PINGROUP(40, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA),
+ PINGROUP(41, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA),
+ PINGROUP(42, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA),
+ PINGROUP(43, blsp_spi6, blsp_uart6, blsp_uim6, NA, NA, NA, NA),
+ PINGROUP(44, blsp_spi6, blsp_uart6, blsp_uim6, NA, NA, NA, NA),
+ PINGROUP(45, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA),
+ PINGROUP(46, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA),
+ PINGROUP(47, blsp_spi12, blsp_uart12, blsp_uim12, NA, NA, NA, NA),
+ PINGROUP(48, blsp_spi12, blsp_uart12, blsp_uim12, gp_pdm0, NA, NA, NA),
+ PINGROUP(49, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA),
+ PINGROUP(50, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA),
+ PINGROUP(51, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA),
+ PINGROUP(52, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA),
+ PINGROUP(53, blsp_spi8, blsp_uart8, blsp_i2c8, NA, NA, NA, NA),
+ PINGROUP(54, blsp_spi8, blsp_uart8, blsp_i2c8, NA, NA, NA, NA),
+ PINGROUP(55, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA),
+ PINGROUP(56, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA),
+ PINGROUP(57, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA),
+ PINGROUP(58, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA),
+ PINGROUP(59, blsp_spi10, blsp_uart10, blsp_uim10, NA, NA, NA, NA),
+ PINGROUP(60, blsp_spi10, blsp_uart10, blsp_uim10, NA, NA, NA, NA),
+ PINGROUP(61, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA, NA),
+ PINGROUP(62, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA, NA),
+ PINGROUP(63, blsp_spi11, blsp_uart11, blsp_uim11, NA, NA, NA, NA),
+ PINGROUP(64, blsp_spi11, blsp_uart11, blsp_uim11, NA, NA, NA, NA),
+ PINGROUP(65, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA),
+ PINGROUP(66, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA),
+ PINGROUP(67, sdc3, blsp_spi3_cs1, NA, NA, NA, NA, NA),
+ PINGROUP(68, sdc3, pci_e0, NA, NA, NA, NA, NA),
+ PINGROUP(69, sdc3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, sdc3, pci_e0_n, pci_e0, NA, NA, NA, NA),
+ PINGROUP(71, sdc3, blsp_spi3_cs2, NA, NA, NA, NA, NA),
+ PINGROUP(72, sdc3, blsp_spi3_cs3, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, sd_write, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, sec_mi2s, sdc4, tsif1, NA, NA, NA, NA),
+ PINGROUP(83, sec_mi2s, sdc4, tsif1, NA, NA, NA, gp_pdm0),
+ PINGROUP(84, sec_mi2s, sdc4, tsif1, NA, NA, NA, gp_pdm1),
+ PINGROUP(85, sec_mi2s, sdc4, tsif1, NA, gp_pdm2, NA, NA),
+ PINGROUP(86, ter_mi2s, sdc4, tsif1, NA, NA, NA, gcc_gp3),
+ PINGROUP(87, ter_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, ter_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, ter_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, ter_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, qua_mi2s, sdc4, tsif2, NA, NA, NA, NA),
+ PINGROUP(92, qua_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, qua_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, qua_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, qua_mi2s, sdc4, tsif2, NA, NA, NA, gcc_gp1),
+ PINGROUP(96, qua_mi2s, sdc4, tsif2, NA, NA, NA, gcc_gp2),
+ PINGROUP(97, qua_mi2s, sdc4, tsif2, NA, gcc_gp3, NA, NA),
+ PINGROUP(98, slimbus, spkr_i2s, NA, NA, NA, NA, NA),
+ PINGROUP(99, slimbus, spkr_i2s, NA, NA, NA, NA, NA),
+ PINGROUP(100, audio_ref, spkr_i2s, NA, NA, NA, NA, NA),
+ PINGROUP(101, sdc4, tsif2, gp_pdm1, NA, NA, NA, NA),
+ PINGROUP(102, uim_batt_alarm, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, edp_hpd, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, spkr_i2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, blsp_spi10_cs1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, gp_pdm2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, blsp_spi10_cs2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, blsp_spi1_cs1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, blsp_spi1_cs2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, blsp_spi1_cs3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, cci_timer4, cci_async, sata_devsleep, sata_devsleep_n, NA, NA, NA),
+ PINGROUP(120, cci_async, NA, NA, NA, NA, NA, NA),
+ PINGROUP(121, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(122, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, hdmi_dtest, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, spdif_tx, ldo_en, NA, NA, NA, NA, NA),
+ PINGROUP(125, ldo_update, hdmi_rcv, NA, NA, NA, NA, NA),
+ PINGROUP(126, gcc_vtt, NA, NA, NA, NA, NA, NA),
+ PINGROUP(127, gcc_obt, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, blsp_spi10_cs3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(129, sata_act, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, uim, blsp_spi7, blsp_uart7, blsp_uim7, NA, NA, NA),
+ PINGROUP(131, uim, blsp_spi7, blsp_uart7, blsp_uim7, NA, NA, NA),
+ PINGROUP(132, uim, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA),
+ PINGROUP(133, uim, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA),
+ PINGROUP(134, hsic, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, hsic, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, spdif_tx, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, pci_e1_rst_n, pci_e1_rst, NA, NA, NA, NA, NA),
+ PINGROUP(141, pci_e1_clkreq_n, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, spdif_tx, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(145, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(146, sdc_emmc_mode, NA, NA, NA, NA, NA, NA),
+
+ SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x2044, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x2048, 9, 0),
+};
+
+#define NUM_GPIO_PINGROUPS 147
+
+static const struct msm_pinctrl_soc_data apq8084_pinctrl = {
+ .pins = apq8084_pins,
+ .npins = ARRAY_SIZE(apq8084_pins),
+ .functions = apq8084_functions,
+ .nfunctions = ARRAY_SIZE(apq8084_functions),
+ .groups = apq8084_groups,
+ .ngroups = ARRAY_SIZE(apq8084_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int apq8084_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &apq8084_pinctrl);
+}
+
+static const struct of_device_id apq8084_pinctrl_of_match[] = {
+ { .compatible = "qcom,apq8084-pinctrl", },
+ { },
+};
+
+static struct platform_driver apq8084_pinctrl_driver = {
+ .driver = {
+ .name = "apq8084-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = apq8084_pinctrl_of_match,
+ },
+ .probe = apq8084_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init apq8084_pinctrl_init(void)
+{
+ return platform_driver_register(&apq8084_pinctrl_driver);
+}
+arch_initcall(apq8084_pinctrl_init);
+
+static void __exit apq8084_pinctrl_exit(void)
+{
+ platform_driver_unregister(&apq8084_pinctrl_driver);
+}
+module_exit(apq8084_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm APQ8084 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, apq8084_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index 767cf1120b20..81f49a9b4dbe 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -211,6 +211,7 @@ static const unsigned int sdc3_data_pins[] = { 71 };
.intr_status_bit = 0, \
.intr_ack_high = 1, \
.intr_target_bit = 0, \
+ .intr_target_kpss_val = 4, \
.intr_raw_status_bit = 3, \
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
@@ -236,6 +237,7 @@ static const unsigned int sdc3_data_pins[] = { 71 };
.intr_enable_bit = -1, \
.intr_status_bit = -1, \
.intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
.intr_raw_status_bit = -1, \
.intr_polarity_bit = -1, \
.intr_detection_bit = -1, \
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2738108caff2..d30dddd21323 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -26,6 +27,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/reboot.h>
#include "../core.h"
#include "../pinconf.h"
@@ -33,12 +35,14 @@
#include "../pinctrl-utils.h"
#define MAX_NR_GPIO 300
+#define PS_HOLD_OFFSET 0x820
/**
* struct msm_pinctrl - state for a pinctrl-msm device
* @dev: device handle.
* @pctrl: pinctrl handle.
* @chip: gpiochip handle.
+ * @restart_nb: restart notifier block.
* @irq: parent irq for the TLMM irq_chip.
* @lock: Spinlock to protect register resources as well
* as msm_pinctrl data structures.
@@ -52,6 +56,7 @@ struct msm_pinctrl {
struct device *dev;
struct pinctrl_dev *pctrl;
struct gpio_chip chip;
+ struct notifier_block restart_nb;
int irq;
spinlock_t lock;
@@ -130,9 +135,9 @@ static int msm_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int msm_pinmux_enable(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct msm_pingroup *g;
@@ -166,7 +171,7 @@ static const struct pinmux_ops msm_pinmux_ops = {
.get_functions_count = msm_get_functions_count,
.get_function_name = msm_get_function_name,
.get_function_groups = msm_get_function_groups,
- .enable = msm_pinmux_enable,
+ .set_mux = msm_pinmux_set_mux,
};
static int msm_config_reg(struct msm_pinctrl *pctrl,
@@ -649,8 +654,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
spin_unlock_irqrestore(&pctrl->lock, flags);
}
-#define INTR_TARGET_PROC_APPS 4
-
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -674,7 +677,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
/* Route interrupts to application cpu */
val = readl(pctrl->regs + g->intr_target_reg);
val &= ~(7 << g->intr_target_bit);
- val |= INTR_TARGET_PROC_APPS << g->intr_target_bit;
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
writel(val, pctrl->regs + g->intr_target_reg);
/* Update configuration for gpio.
@@ -829,6 +832,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
if (ret) {
dev_err(pctrl->dev, "Failed to add pin range\n");
+ gpiochip_remove(&pctrl->chip);
return ret;
}
@@ -839,6 +843,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n");
+ gpiochip_remove(&pctrl->chip);
return -ENOSYS;
}
@@ -848,6 +853,32 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return 0;
}
+static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb);
+
+ writel(0, pctrl->regs + PS_HOLD_OFFSET);
+ mdelay(1000);
+ return NOTIFY_DONE;
+}
+
+static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
+{
+ int i = 0;
+ const struct msm_function *func = pctrl->soc->functions;
+
+ for (; i <= pctrl->soc->nfunctions; i++)
+ if (!strcmp(func[i].name, "ps_hold")) {
+ pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
+ pctrl->restart_nb.priority = 128;
+ if (register_restart_handler(&pctrl->restart_nb))
+ dev_err(pctrl->dev,
+ "failed to setup restart handler.\n");
+ break;
+ }
+}
+
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data)
{
@@ -871,6 +902,8 @@ int msm_pinctrl_probe(struct platform_device *pdev,
if (IS_ERR(pctrl->regs))
return PTR_ERR(pctrl->regs);
+ msm_pinctrl_setup_pm_reset(pctrl);
+
pctrl->irq = platform_get_irq(pdev, 0);
if (pctrl->irq < 0) {
dev_err(&pdev->dev, "No interrupt defined for msmgpio\n");
@@ -913,6 +946,8 @@ int msm_pinctrl_remove(struct platform_device *pdev)
pinctrl_unregister(pctrl->pctrl);
+ unregister_restart_handler(&pctrl->restart_nb);
+
return 0;
}
EXPORT_SYMBOL(msm_pinctrl_remove);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 7b2a227a590a..b952c4b4a8e9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -53,6 +53,8 @@ struct msm_function {
* @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt
* status.
* @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing.
+ * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from
+ * this gpio should get routed to the KPSS processor.
* @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit.
* @intr_polarity_bit: Offset in @intr_cfg_reg for specifying polarity of the interrupt.
* @intr_detection_bit: Offset in @intr_cfg_reg for specifying interrupt type.
@@ -88,6 +90,7 @@ struct msm_pingroup {
unsigned intr_ack_high:1;
unsigned intr_target_bit:5;
+ unsigned intr_target_kpss_val:5;
unsigned intr_raw_status_bit:5;
unsigned intr_polarity_bit:5;
unsigned intr_detection_bit:5;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index 35047036a053..2ab21ce5575a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -384,6 +384,7 @@ static const unsigned int sdc3_data_pins[] = { 157 };
.intr_status_bit = 0, \
.intr_ack_high = 1, \
.intr_target_bit = 0, \
+ .intr_target_kpss_val = 4, \
.intr_raw_status_bit = 3, \
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
@@ -409,6 +410,7 @@ static const unsigned int sdc3_data_pins[] = { 157 };
.intr_enable_bit = -1, \
.intr_status_bit = -1, \
.intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
.intr_raw_status_bit = -1, \
.intr_polarity_bit = -1, \
.intr_detection_bit = -1, \
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 8c9720154d1e..3c858384d041 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -366,6 +366,7 @@ static const unsigned int sdc2_data_pins[] = { 151 };
.intr_enable_bit = 0, \
.intr_status_bit = 0, \
.intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
.intr_raw_status_bit = 4, \
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
@@ -391,6 +392,7 @@ static const unsigned int sdc2_data_pins[] = { 151 };
.intr_enable_bit = -1, \
.intr_status_bit = -1, \
.intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
.intr_raw_status_bit = -1, \
.intr_polarity_bit = -1, \
.intr_detection_bit = -1, \
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 603da2f9dd95..b995ec2c5d16 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -364,8 +364,9 @@ static void exynos5440_pinmux_setup(struct pinctrl_dev *pctldev, unsigned select
}
/* enable a specified pinmux by writing to registers */
-static int exynos5440_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int exynos5440_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned group)
{
exynos5440_pinmux_setup(pctldev, selector, group, true);
return 0;
@@ -387,7 +388,7 @@ static const struct pinmux_ops exynos5440_pinmux_ops = {
.get_functions_count = exynos5440_get_functions_count,
.get_function_name = exynos5440_pinmux_get_fname,
.get_function_groups = exynos5440_pinmux_get_groups,
- .enable = exynos5440_pinmux_enable,
+ .set_mux = exynos5440_pinmux_set_mux,
.gpio_set_direction = exynos5440_pinmux_gpio_set_direction,
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index b07406da333c..4a47691c32b1 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -401,8 +401,9 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
}
/* enable a specified pinmux by writing to registers */
-static int samsung_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned group)
{
samsung_pinmux_setup(pctldev, selector, group, true);
return 0;
@@ -413,7 +414,7 @@ static const struct pinmux_ops samsung_pinmux_ops = {
.get_functions_count = samsung_get_functions_count,
.get_function_name = samsung_pinmux_get_fname,
.get_function_groups = samsung_pinmux_get_groups,
- .enable = samsung_pinmux_enable,
+ .set_mux = samsung_pinmux_set_mux,
};
/* set or get the pin config settings for a specified pin */
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index b9b464d0578c..6572c233f73d 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -542,7 +542,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
*/
ret = sh_pfc_register_pinctrl(pfc);
if (unlikely(ret != 0))
- goto error;
+ return ret;
#ifdef CONFIG_GPIO_SH_PFC
/*
@@ -564,11 +564,6 @@ static int sh_pfc_probe(struct platform_device *pdev)
dev_info(pfc->dev, "%s support registered\n", info->name);
return 0;
-
-error:
- if (info->ops && info->ops->exit)
- info->ops->exit(pfc);
- return ret;
}
static int sh_pfc_remove(struct platform_device *pdev)
@@ -580,9 +575,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
#endif
sh_pfc_unregister_pinctrl(pfc);
- if (pfc->info->ops && pfc->info->ops->exit)
- pfc->info->ops->exit(pfc);
-
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index b7b0e6ccf305..3daaa5241c47 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -33,7 +33,6 @@ struct sh_pfc_pin_range {
struct sh_pfc {
struct device *dev;
const struct sh_pfc_soc_info *info;
- void *soc_data;
spinlock_t lock;
unsigned int num_windows;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index ce9fb7aa8ba3..280a56f97786 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -2717,14 +2717,14 @@ static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
iowrite8(value, addr);
}
-static const struct sh_pfc_soc_operations r8a73a4_pinmux_ops = {
+static const struct sh_pfc_soc_operations r8a73a4_pfc_ops = {
.get_bias = r8a73a4_pinmux_get_bias,
.set_bias = r8a73a4_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
.name = "r8a73a4_pfc",
- .ops = &r8a73a4_pinmux_ops,
+ .ops = &r8a73a4_pfc_ops,
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index e4c1ef477053..b486e9d20cc2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3752,14 +3752,14 @@ static void r8a7740_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
iowrite8(value, addr);
}
-static const struct sh_pfc_soc_operations r8a7740_pinmux_ops = {
+static const struct sh_pfc_soc_operations r8a7740_pfc_ops = {
.get_bias = r8a7740_pinmux_get_bias,
.set_bias = r8a7740_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a7740_pinmux_info = {
.name = "r8a7740_pfc",
- .ops = &r8a7740_pinmux_ops,
+ .ops = &r8a7740_pfc_ops,
.input = { PINMUX_INPUT_BEGIN,
PINMUX_INPUT_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index d9158b3b2919..8211f66a2f68 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -2614,14 +2614,14 @@ static void sh7372_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
iowrite8(value, addr);
}
-static const struct sh_pfc_soc_operations sh7372_pinmux_ops = {
+static const struct sh_pfc_soc_operations sh7372_pfc_ops = {
.get_bias = sh7372_pinmux_get_bias,
.set_bias = sh7372_pinmux_set_bias,
};
const struct sh_pfc_soc_info sh7372_pinmux_info = {
.name = "sh7372_pfc",
- .ops = &sh7372_pinmux_ops,
+ .ops = &sh7372_pfc_ops,
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 0bd8f4401b42..d2efbfb776ac 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3824,39 +3824,28 @@ static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
* SoC information
*/
-struct sh73a0_pinmux_data {
- struct regulator_dev *vccq_mc0;
-};
-
static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc)
{
- struct sh73a0_pinmux_data *data;
struct regulator_config cfg = { };
+ struct regulator_dev *vccq;
int ret;
- data = devm_kzalloc(pfc->dev, sizeof(*data), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
cfg.dev = pfc->dev;
cfg.init_data = &sh73a0_vccq_mc0_init_data;
cfg.driver_data = pfc;
- data->vccq_mc0 = devm_regulator_register(pfc->dev,
- &sh73a0_vccq_mc0_desc, &cfg);
- if (IS_ERR(data->vccq_mc0)) {
- ret = PTR_ERR(data->vccq_mc0);
+ vccq = devm_regulator_register(pfc->dev, &sh73a0_vccq_mc0_desc, &cfg);
+ if (IS_ERR(vccq)) {
+ ret = PTR_ERR(vccq);
dev_err(pfc->dev, "Failed to register VCCQ MC0 regulator: %d\n",
ret);
return ret;
}
- pfc->soc_data = data;
-
return 0;
}
-static const struct sh_pfc_soc_operations sh73a0_pinmux_ops = {
+static const struct sh_pfc_soc_operations sh73a0_pfc_ops = {
.init = sh73a0_pinmux_soc_init,
.get_bias = sh73a0_pinmux_get_bias,
.set_bias = sh73a0_pinmux_set_bias,
@@ -3864,7 +3853,7 @@ static const struct sh_pfc_soc_operations sh73a0_pinmux_ops = {
const struct sh_pfc_soc_info sh73a0_pinmux_info = {
.name = "sh73a0_pfc",
- .ops = &sh73a0_pinmux_ops,
+ .ops = &sh73a0_pfc_ops,
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 11db3ee39d40..910deaefa0ac 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -312,8 +312,8 @@ static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int sh_pfc_func_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int sh_pfc_func_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
@@ -442,7 +442,7 @@ static const struct pinmux_ops sh_pfc_pinmux_ops = {
.get_functions_count = sh_pfc_get_functions_count,
.get_function_name = sh_pfc_get_function_name,
.get_function_groups = sh_pfc_get_function_groups,
- .enable = sh_pfc_func_enable,
+ .set_mux = sh_pfc_func_set_mux,
.gpio_request_enable = sh_pfc_gpio_request_enable,
.gpio_disable_free = sh_pfc_gpio_disable_free,
.gpio_set_direction = sh_pfc_gpio_set_direction,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index d482c40b012a..5b7283182c1e 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -116,7 +116,6 @@ struct sh_pfc;
struct sh_pfc_soc_operations {
int (*init)(struct sh_pfc *pfc);
- void (*exit)(struct sh_pfc *pfc);
unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index c4dd3d5cf9c3..45f8391ddb34 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -134,8 +134,9 @@ static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
.mask = BIT(30) | BIT(31),
}, {
.group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) |
+ .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
+ BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
+ BIT(16) | BIT(17) | BIT(18) | BIT(19) |
BIT(20) | BIT(21) | BIT(22) | BIT(31),
},
};
@@ -148,14 +149,15 @@ static const struct sirfsoc_padmux lcd_16bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_16bits_pins[] = { 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83,
- 84, 85, 86, 95 };
+static const unsigned lcd_16bits_pins[] = { 62, 63, 65, 70, 71, 72, 73, 74, 75,
+ 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 };
static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
{
.group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) |
+ .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
+ BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
+ BIT(16) | BIT(17) | BIT(18) | BIT(19) |
BIT(20) | BIT(21) | BIT(22) | BIT(31),
}, {
.group = 1,
@@ -174,21 +176,23 @@ static const struct sirfsoc_padmux lcd_18bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_18bits_pins[] = { 16, 17, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83,
- 84, 85, 86, 95 };
+static const unsigned lcd_18bits_pins[] = { 16, 17, 62, 63, 65, 70, 71, 72, 73,
+ 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 };
static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
{
.group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) |
+ .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) |
+ BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) |
+ BIT(16) | BIT(17) | BIT(18) | BIT(19) |
BIT(20) | BIT(21) | BIT(22) | BIT(31),
}, {
.group = 1,
.mask = BIT(30) | BIT(31),
}, {
.group = 0,
- .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+ .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) |
+ BIT(21) | BIT(22) | BIT(23),
},
};
@@ -200,14 +204,16 @@ static const struct sirfsoc_padmux lcd_24bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79,
- 80, 81, 82, 83, 84, 85, 86, 95};
+static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 62,
+ 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84,
+ 85, 86, 95};
static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
{
.group = 2,
- .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
- BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) |
+ .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) |
+ BIT(11) | BIT(12) | BIT(13) | BIT(15) | BIT(16) |
+ BIT(17) | BIT(18) | BIT(19) |
BIT(20) | BIT(21) | BIT(22) | BIT(31),
}, {
.group = 1,
@@ -226,8 +232,8 @@ static const struct sirfsoc_padmux lcdrom_padmux = {
.funcval = BIT(4),
};
-static const unsigned lcdrom_pins[] = { 8, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83,
- 84, 85, 86, 95};
+static const unsigned lcdrom_pins[] = { 8, 62, 63, 65, 70, 71, 72, 73, 74, 75,
+ 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95};
static const struct sirfsoc_muxmask uart0_muxmask[] = {
{
@@ -371,11 +377,42 @@ static const struct sirfsoc_padmux cko1_padmux = {
static const unsigned cko1_pins[] = { 42 };
-static const struct sirfsoc_muxmask i2s_muxmask[] = {
+static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = {
{
.group = 1,
.mask = BIT(10),
- }, {
+ },
+};
+
+static const struct sirfsoc_padmux i2s_mclk_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask),
+ .muxmask = i2s_mclk_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(3),
+ .funcval = BIT(3),
+};
+
+static const unsigned i2s_mclk_pins[] = { 42 };
+
+static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask),
+ .muxmask = i2s_ext_clk_input_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(2),
+ .funcval = BIT(2),
+};
+
+static const unsigned i2s_ext_clk_input_pins[] = { 51 };
+
+static const struct sirfsoc_muxmask i2s_muxmask[] = {
+ {
.group = 3,
.mask = BIT(2) | BIT(3) | BIT(4) | BIT(5),
},
@@ -385,17 +422,12 @@ static const struct sirfsoc_padmux i2s_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_muxmask),
.muxmask = i2s_muxmask,
.ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = BIT(3),
};
-static const unsigned i2s_pins[] = { 42, 98, 99, 100, 101 };
+static const unsigned i2s_pins[] = { 98, 99, 100, 101 };
static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = {
{
- .group = 1,
- .mask = BIT(10),
- }, {
.group = 3,
.mask = BIT(2) | BIT(3) | BIT(4),
},
@@ -405,17 +437,12 @@ static const struct sirfsoc_padmux i2s_no_din_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask),
.muxmask = i2s_no_din_muxmask,
.ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3),
- .funcval = BIT(3),
};
-static const unsigned i2s_no_din_pins[] = { 42, 98, 99, 100 };
+static const unsigned i2s_no_din_pins[] = { 98, 99, 100 };
static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = {
{
- .group = 1,
- .mask = BIT(10) | BIT(20) | BIT(23),
- }, {
.group = 3,
.mask = BIT(2) | BIT(3) | BIT(4) | BIT(5),
},
@@ -425,11 +452,11 @@ static const struct sirfsoc_padmux i2s_6chn_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask),
.muxmask = i2s_6chn_muxmask,
.ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(1) | BIT(3) | BIT(9),
- .funcval = BIT(1) | BIT(3) | BIT(9),
+ .funcmask = BIT(1) | BIT(9),
+ .funcval = BIT(1) | BIT(9),
};
-static const unsigned i2s_6chn_pins[] = { 42, 52, 55, 98, 99, 100, 101 };
+static const unsigned i2s_6chn_pins[] = { 52, 55, 98, 99, 100, 101 };
static const struct sirfsoc_muxmask ac97_muxmask[] = {
{
@@ -716,7 +743,8 @@ static const struct sirfsoc_padmux vip_padmux = {
.funcval = BIT(18),
};
-static const unsigned vip_pins[] = { 36, 37, 38, 40, 41, 56, 57, 58, 59, 60, 61 };
+static const unsigned vip_pins[] = { 36, 37, 38, 40, 41, 56, 57, 58, 59,
+ 60, 61 };
static const struct sirfsoc_muxmask vip_noupli_muxmask[] = {
{
@@ -737,7 +765,8 @@ static const struct sirfsoc_padmux vip_noupli_padmux = {
.funcval = BIT(15),
};
-static const unsigned vip_noupli_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 87, 88, 89 };
+static const unsigned vip_noupli_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23,
+ 87, 88, 89 };
static const struct sirfsoc_muxmask i2c0_muxmask[] = {
{
@@ -876,7 +905,8 @@ static const struct sirfsoc_padmux usb0_upli_drvbus_padmux = {
.funcval = 0,
};
-static const unsigned usb0_upli_drvbus_pins[] = { 36, 37, 38, 39, 40, 41, 56, 57, 58, 59, 60, 61 };
+static const unsigned usb0_upli_drvbus_pins[] = { 36, 37, 38, 39, 40,
+ 41, 56, 57, 58, 59, 60, 61 };
static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
{
@@ -968,6 +998,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
+ SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins),
+ SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins),
SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins),
SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins),
@@ -1017,8 +1049,11 @@ static const char * const sdmmc2_nowpgrp[] = { "sdmmc2_nowpgrp" };
static const char * const usb0_upli_drvbusgrp[] = { "usb0_upli_drvbusgrp" };
static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
-static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
+static const char * const
+ uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
static const char * const pulse_countgrp[] = { "pulse_countgrp" };
+static const char * const i2smclkgrp[] = { "i2smclkgrp" };
+static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" };
static const char * const i2sgrp[] = { "i2sgrp" };
static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" };
static const char * const i2s_6chngrp[] = { "i2s_6chngrp" };
@@ -1038,7 +1073,8 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
uart0_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
- SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl",
+ uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
usp0_uart_nostreamctrl_grp,
@@ -1068,12 +1104,19 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
- SIRFSOC_PMX_FUNCTION("sdmmc2_nowp", sdmmc2_nowpgrp, sdmmc2_nowp_padmux),
- SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus", usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("sdmmc2_nowp",
+ sdmmc2_nowpgrp, sdmmc2_nowp_padmux),
+ SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus",
+ usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus",
+ usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
- SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
+ SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1",
+ uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp,
+ i2s_ext_clk_input_padmux),
SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux),
SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 8aa76f0776d7..357678ee28e3 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -135,8 +135,9 @@ static const struct pinctrl_pin_desc sirfsoc_pads[] = {
static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
{
.group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
- BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(17) | BIT(18),
}, {
.group = 2,
@@ -152,14 +153,15 @@ static const struct sirfsoc_padmux lcd_16bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
{
.group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
- BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(17) | BIT(18),
}, {
.group = 2,
@@ -178,21 +180,23 @@ static const struct sirfsoc_padmux lcd_18bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
+static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
{
.group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
- BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(17) | BIT(18),
}, {
.group = 2,
.mask = BIT(31),
}, {
.group = 0,
- .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+ .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) |
+ BIT(21) | BIT(22) | BIT(23),
},
};
@@ -204,14 +208,16 @@ static const struct sirfsoc_padmux lcd_24bits_padmux = {
.funcval = 0,
};
-static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114 };
static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
{
.group = 3,
- .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
- BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) |
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(17) | BIT(18),
}, {
.group = 2,
@@ -230,8 +236,8 @@ static const struct sirfsoc_padmux lcdrom_padmux = {
.funcval = BIT(4),
};
-static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
static const struct sirfsoc_muxmask uart0_muxmask[] = {
{
@@ -380,12 +386,44 @@ static const struct sirfsoc_padmux cko1_padmux = {
static const unsigned cko1_pins[] = { 42 };
+static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(10),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_mclk_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask),
+ .muxmask = i2s_mclk_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(3),
+ .funcval = BIT(3),
+};
+
+static const unsigned i2s_mclk_pins[] = { 42 };
+
+static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask),
+ .muxmask = i2s_ext_clk_input_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(2),
+ .funcval = BIT(2),
+};
+
+static const unsigned i2s_ext_clk_input_pins[] = { 51 };
+
static const struct sirfsoc_muxmask i2s_muxmask[] = {
{
.group = 1,
- .mask =
- BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19)
- | BIT(23) | BIT(28),
+ .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
},
};
@@ -393,11 +431,42 @@ static const struct sirfsoc_padmux i2s_padmux = {
.muxmask_counts = ARRAY_SIZE(i2s_muxmask),
.muxmask = i2s_muxmask,
.ctrlreg = SIRFSOC_RSC_PIN_MUX,
- .funcmask = BIT(3) | BIT(9),
- .funcval = BIT(3),
};
-static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 };
+static const unsigned i2s_pins[] = { 43, 44, 45, 46 };
+
+static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(11) | BIT(12) | BIT(14),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_no_din_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask),
+ .muxmask = i2s_no_din_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+};
+
+static const unsigned i2s_no_din_pins[] = { 43, 44, 46 };
+
+static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14)
+ | BIT(23) | BIT(28),
+ },
+};
+
+static const struct sirfsoc_padmux i2s_6chn_padmux = {
+ .muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask),
+ .muxmask = i2s_6chn_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(9),
+ .funcval = BIT(1) | BIT(9),
+};
+
+static const unsigned i2s_6chn_pins[] = { 43, 44, 45, 46, 55, 60 };
static const struct sirfsoc_muxmask ac97_muxmask[] = {
{
@@ -685,7 +754,8 @@ static const struct sirfsoc_padmux vip_padmux = {
.funcval = 0,
};
-static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89 };
static const struct sirfsoc_muxmask i2c0_muxmask[] = {
{
@@ -735,7 +805,8 @@ static const struct sirfsoc_padmux viprom_padmux = {
.funcval = BIT(0),
};
-static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89 };
static const struct sirfsoc_muxmask pwm0_muxmask[] = {
{
@@ -918,7 +989,11 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins),
SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins),
SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
+ SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins),
+ SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins),
SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
+ SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins),
+ SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins),
SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
@@ -936,16 +1011,19 @@ static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart2grp[] = { "uart2grp" };
static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
static const char * const usp0grp[] = { "usp0grp" };
-static const char * const usp0_uart_nostreamctrl_grp[] =
- { "usp0_uart_nostreamctrl_grp" };
+static const char * const usp0_uart_nostreamctrl_grp[] = {
+ "usp0_uart_nostreamctrl_grp"
+};
static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
static const char * const usp1grp[] = { "usp1grp" };
-static const char * const usp1_uart_nostreamctrl_grp[] =
- { "usp1_uart_nostreamctrl_grp" };
+static const char * const usp1_uart_nostreamctrl_grp[] = {
+ "usp1_uart_nostreamctrl_grp"
+};
static const char * const usp2grp[] = { "usp2grp" };
-static const char * const usp2_uart_nostreamctrl_grp[] =
- { "usp2_uart_nostreamctrl_grp" };
+static const char * const usp2_uart_nostreamctrl_grp[] = {
+ "usp2_uart_nostreamctrl_grp"
+};
static const char * const i2c0grp[] = { "i2c0grp" };
static const char * const i2c1grp[] = { "i2c1grp" };
static const char * const pwm0grp[] = { "pwm0grp" };
@@ -966,9 +1044,14 @@ static const char * const sdmmc5grp[] = { "sdmmc5grp" };
static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" };
-static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
+static const char * const
+ uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" };
static const char * const pulse_countgrp[] = { "pulse_countgrp" };
+static const char * const i2smclkgrp[] = { "i2smclkgrp" };
+static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" };
static const char * const i2sgrp[] = { "i2sgrp" };
+static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" };
+static const char * const i2s_6chngrp[] = { "i2s_6chngrp" };
static const char * const ac97grp[] = { "ac97grp" };
static const char * const nandgrp[] = { "nandgrp" };
static const char * const spi0grp[] = { "spi0grp" };
@@ -981,15 +1064,19 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
- SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl",
+ uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
- SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl",
+ uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
usp0_uart_nostreamctrl_grp, usp0_uart_nostreamctrl_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp, usp0_only_utfs_padmux),
- SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp, usp0_only_urfs_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_utfs",
+ usp0_only_utfs_grp, usp0_only_utfs_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_urfs",
+ usp0_only_urfs_grp, usp0_only_urfs_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
usp1_uart_nostreamctrl_grp, usp1_uart_nostreamctrl_padmux),
@@ -1013,12 +1100,20 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux),
SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
- SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
- SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus",
+ usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
+ SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus",
+ usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux),
- SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
+ SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1",
+ uart1_route_io_usb1grp, uart1_route_io_usb1_padmux),
SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp,
+ i2s_ext_clk_input_padmux),
SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux),
+ SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux),
SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 4c1d7c68666d..b713bd59ffbb 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -58,17 +58,18 @@ static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
return sirfsoc_pin_groups[selector].name;
}
-static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *num_pins)
+static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
{
*pins = sirfsoc_pin_groups[selector].pins;
*num_pins = sirfsoc_pin_groups[selector].num_pins;
return 0;
}
-static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset)
+static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned offset)
{
seq_printf(s, " " DRIVER_NAME);
}
@@ -138,22 +139,25 @@ static struct pinctrl_ops sirfsoc_pctrl_ops = {
static struct sirfsoc_pmx_func *sirfsoc_pmx_functions;
static int sirfsoc_pmxfunc_cnt;
-static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector,
- bool enable)
+static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx,
+ unsigned selector, bool enable)
{
int i;
- const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux;
+ const struct sirfsoc_padmux *mux =
+ sirfsoc_pmx_functions[selector].padmux;
const struct sirfsoc_muxmask *mask = mux->muxmask;
for (i = 0; i < mux->muxmask_counts; i++) {
u32 muxval;
if (!spmx->is_marco) {
- muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+ muxval = readl(spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(mask[i].group));
if (enable)
muxval = muxval & ~mask[i].mask;
else
muxval = muxval | mask[i].mask;
- writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+ writel(muxval, spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(mask[i].group));
} else {
if (enable)
writel(mask[i].mask, spmx->gpio_virtbase +
@@ -175,8 +179,9 @@ static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector
}
}
-static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector,
- unsigned group)
+static int sirfsoc_pinmux_set_mux(struct pinctrl_dev *pmxdev,
+ unsigned selector,
+ unsigned group)
{
struct sirfsoc_pmx *spmx;
@@ -197,9 +202,10 @@ static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
return sirfsoc_pmx_functions[selector].name;
}
-static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
+static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
{
*groups = sirfsoc_pmx_functions[selector].groups;
*num_groups = sirfsoc_pmx_functions[selector].num_groups;
@@ -218,9 +224,11 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
spmx = pinctrl_dev_get_drvdata(pmxdev);
if (!spmx->is_marco) {
- muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+ muxval = readl(spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(group));
muxval = muxval | (1 << (offset - range->pin_base));
- writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+ writel(muxval, spmx->gpio_virtbase +
+ SIRFSOC_GPIO_PAD_EN(group));
} else {
writel(1 << (offset - range->pin_base), spmx->gpio_virtbase +
SIRFSOC_GPIO_PAD_EN(group));
@@ -230,7 +238,7 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
}
static struct pinmux_ops sirfsoc_pinmux_ops = {
- .enable = sirfsoc_pinmux_enable,
+ .set_mux = sirfsoc_pinmux_set_mux,
.get_functions_count = sirfsoc_pinmux_get_funcs_count,
.get_function_name = sirfsoc_pinmux_get_func_name,
.get_function_groups = sirfsoc_pinmux_get_groups,
@@ -518,24 +526,29 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
case IRQ_TYPE_NONE:
break;
case IRQ_TYPE_EDGE_RISING:
- val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
break;
case IRQ_TYPE_EDGE_FALLING:
val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
- val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
break;
case IRQ_TYPE_EDGE_BOTH:
- val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
- SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
+ SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
break;
case IRQ_TYPE_LEVEL_LOW:
- val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
break;
case IRQ_TYPE_LEVEL_HIGH:
val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
- val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
break;
}
@@ -694,7 +707,8 @@ static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_chip *sgpio,
spin_unlock_irqrestore(&bank->lock, flags);
}
-static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value)
+static int sirfsoc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
{
struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(chip);
struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, gpio);
@@ -839,7 +853,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
if (err) {
dev_err(&pdev->dev,
"could not connect irqchip to gpiochip\n");
- goto out;
+ goto out_banks;
}
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
@@ -898,8 +912,8 @@ static int __init sirfsoc_gpio_init(void)
}
subsys_initcall(sirfsoc_gpio_init);
-MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
- "Yuping Luo <yuping.luo@csr.com>, "
- "Barry Song <baohua.song@csr.com>");
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
+MODULE_AUTHOR("Yuping Luo <yuping.luo@csr.com>");
+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
MODULE_DESCRIPTION("SIRFSOC pin control driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index f72cc4e192bd..abdb05ac43dc 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -268,7 +268,7 @@ static int spear_pinctrl_endisable(struct pinctrl_dev *pctldev,
return 0;
}
-static int spear_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+static int spear_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned function,
unsigned group)
{
return spear_pinctrl_endisable(pctldev, function, group, true);
@@ -338,7 +338,7 @@ static const struct pinmux_ops spear_pinmux_ops = {
.get_functions_count = spear_pinctrl_get_funcs_count,
.get_function_name = spear_pinctrl_get_func_name,
.get_function_groups = spear_pinctrl_get_func_groups,
- .enable = spear_pinctrl_enable,
+ .set_mux = spear_pinctrl_set_mux,
.gpio_request_enable = gpio_request_enable,
.gpio_disable_free = gpio_disable_free,
};
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index 1a8bbfec60ca..6d57d43ab640 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2692,7 +2692,7 @@ static struct spear_pinctrl_machdata spear1310_machdata = {
.modes_supported = false,
};
-static struct of_device_id spear1310_pinctrl_of_match[] = {
+static const struct of_device_id spear1310_pinctrl_of_match[] = {
{
.compatible = "st,spear1310-pinmux",
},
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index 873966e2b99f..d243e43e7f6d 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2008,7 +2008,7 @@ static struct spear_pinctrl_machdata spear1340_machdata = {
.modes_supported = false,
};
-static struct of_device_id spear1340_pinctrl_of_match[] = {
+static const struct of_device_id spear1340_pinctrl_of_match[] = {
{
.compatible = "st,spear1340-pinmux",
},
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 4777c0d0e730..9db83e9ee18c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -646,7 +646,7 @@ static struct spear_function *spear300_functions[] = {
&gpio1_function,
};
-static struct of_device_id spear300_pinctrl_of_match[] = {
+static const struct of_device_id spear300_pinctrl_of_match[] = {
{
.compatible = "st,spear300-pinmux",
},
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index ed1d3608f486..db775a414b7a 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -371,7 +371,7 @@ static struct spear_function *spear310_functions[] = {
&tdm_function,
};
-static struct of_device_id spear310_pinctrl_of_match[] = {
+static const struct of_device_id spear310_pinctrl_of_match[] = {
{
.compatible = "st,spear310-pinmux",
},
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index b8e290a8c8c9..80fbd68e17bc 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3410,7 +3410,7 @@ static struct spear_function *spear320_functions[] = {
&i2c2_function,
};
-static struct of_device_id spear320_pinctrl_of_match[] = {
+static const struct of_device_id spear320_pinctrl_of_match[] = {
{
.compatible = "st,spear320-pinmux",
},
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3df66e366c87..ef9d804e55de 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -393,9 +393,9 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
spin_unlock_irqrestore(&pctl->lock, flags);
}
-static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+static int sunxi_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = pctl->groups + group;
@@ -441,7 +441,7 @@ static const struct pinmux_ops sunxi_pmx_ops = {
.get_functions_count = sunxi_pmx_get_funcs_cnt,
.get_function_name = sunxi_pmx_get_func_name,
.get_function_groups = sunxi_pmx_get_func_groups,
- .enable = sunxi_pmx_enable,
+ .set_mux = sunxi_pmx_set_mux,
.gpio_set_direction = sunxi_pmx_gpio_set_direction,
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 8cea355f9a81..d055d63309e4 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -131,9 +131,9 @@ static int wmt_set_pinmux(struct wmt_pinctrl_data *data, unsigned func,
return 0;
}
-static int wmt_pmx_enable(struct pinctrl_dev *pctldev,
- unsigned func_selector,
- unsigned group_selector)
+static int wmt_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ unsigned group_selector)
{
struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
u32 pinnum = data->pins[group_selector].number;
@@ -168,7 +168,7 @@ static struct pinmux_ops wmt_pinmux_ops = {
.get_functions_count = wmt_pmx_get_functions_count,
.get_function_name = wmt_pmx_get_function_name,
.get_function_groups = wmt_pmx_get_function_groups,
- .enable = wmt_pmx_enable,
+ .set_mux = wmt_pmx_set_mux,
.gpio_disable_free = wmt_pmx_gpio_disable_free,
.gpio_set_direction = wmt_pmx_gpio_set_direction,
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 3bbcbf12c1fb..4dcfb7116a04 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -540,7 +540,7 @@ config ASUS_NB_WMI
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
- For more informations, see
+ For more information, see
<file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
If you have an ACPI-WMI compatible Asus Notebook, say Y or M
@@ -553,7 +553,7 @@ config EEEPC_WMI
This is a driver for newer Eee PC laptops. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
- For more informations, see
+ For more information, see
<file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2dc8289e5dba..55d7b7b0f2e0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -199,13 +199,14 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214 regulator"
depends on I2C
select REGMAP_I2C
help
- Say y here to support for the Dialog Semiconductor DA9211/DA9212.
- The DA9211/DA9212 is a multi-phase synchronous step down
- converter 12A DC-DC Buck controlled through an I2C
+ Say y here to support for the Dialog Semiconductor DA9211/DA9212
+ /DA9213/DA9214.
+ The DA9211/DA9212/DA9213/DA9214 is a multi-phase synchronous
+ step down converter 12A or 16A DC-DC Buck controlled through an I2C
interface.
config REGULATOR_DBX500_PRCMU
@@ -240,6 +241,23 @@ config REGULATOR_GPIO
and the platform has to provide a mapping of GPIO-states
to target volts/amps.
+config REGULATOR_HI6421
+ tristate "HiSilicon Hi6421 PMIC voltage regulator support"
+ depends on MFD_HI6421_PMIC && OF
+ help
+ This driver provides support for the voltage regulators on the
+ HiSilicon Hi6421 PMU / Codec IC.
+ Hi6421 is a multi-function device which, on regulator part, provides
+ 21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
+ of them come with support to either ECO (idle) or sleep mode.
+
+config REGULATOR_ISL9305
+ tristate "Intersil ISL9305 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports ISL9305 voltage regulator chip.
+
config REGULATOR_ISL6271A
tristate "Intersil ISL6271A Power regulator"
depends on I2C
@@ -387,6 +405,15 @@ config REGULATOR_MAX77693
and one current regulator 'CHARGER'. This is suitable for
Exynos-4x12 chips.
+config REGULATOR_MAX77802
+ tristate "Maxim 77802 regulator"
+ depends on MFD_MAX77686
+ help
+ This driver controls a Maxim 77802 regulator
+ via I2C bus. The provided regulator is suitable for
+ Exynos5420/Exynos5800 SoCs to control various voltages.
+ It includes support for control of voltage and ramp speed.
+
config REGULATOR_MC13XXX_CORE
tristate
@@ -449,6 +476,25 @@ config REGULATOR_PFUZE100
Say y here to support the regulators found on the Freescale
PFUZE100/PFUZE200 PMIC.
+config REGULATOR_PWM
+ tristate "PWM voltage regulator"
+ depends on PWM
+ help
+ This driver supports PWM controlled voltage regulators. PWM
+ duty cycle can increase or decrease the voltage.
+
+config REGULATOR_QCOM_RPM
+ tristate "Qualcomm RPM regulator driver"
+ depends on MFD_QCOM_RPM
+ help
+ If you say yes to this option, support will be included for the
+ regulators exposed by the Resource Power Manager found in Qualcomm
+ 8660, 8960 and 8064 based devices.
+
+ Say M here if you want to include support for the regulators on the
+ Qualcomm RPM as a module. The module will be named
+ "qcom_rpm-regulator".
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
@@ -459,6 +505,22 @@ config REGULATOR_RC5T583
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
+config REGULATOR_RK808
+ tristate "Rockchip RK808 Power regulators"
+ depends on MFD_RK808
+ help
+ Select this option to enable the power regulator of ROCKCHIP
+ PMIC RK808.
+ This driver supports the control of different power rails of device
+ through regulator interface. The device supports multiple DCDC/LDO
+ outputs which can be controlled by i2c communication.
+
+config REGULATOR_RN5T618
+ tristate "Ricoh RN5T618 voltage regulators"
+ depends on MFD_RN5T618
+ help
+ Say y here to support the regulators found on Ricoh RN5T618 PMIC.
+
config REGULATOR_S2MPA01
tristate "Samsung S2MPA01 voltage regulator"
depends on MFD_SEC_CORE
@@ -483,11 +545,16 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
-config REGULATOR_ST_PWM
- tristate "STMicroelectronics PWM voltage regulator"
- depends on ARCH_STI
+config REGULATOR_SKY81452
+ tristate "Skyworks Solutions SKY81452 voltage regulator"
+ depends on SKY81452
help
- This driver supports ST's PWM controlled voltage regulators.
+ This driver supports Skyworks SKY81452 voltage output regulator
+ via I2C bus. SKY81452 has one voltage linear regulator can be
+ programmed from 4.5V to 20V.
+
+ This driver can also be built as a module. If so, the module
+ will be called sky81452-regulator.
config REGULATOR_TI_ABB
tristate "TI Adaptive Body Bias on-chip LDO"
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index aa4a6aa7b558..1029ed39c512 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -32,7 +32,9 @@ obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
+obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
+obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
@@ -52,20 +54,25 @@ obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
+obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
+obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o
+obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
-obj-$(CONFIG_REGULATOR_ST_PWM) += st-pwm.o
+obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index b47283f91e2d..8459b0b648cd 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -22,12 +22,10 @@
struct as3711_regulator_info {
struct regulator_desc desc;
- unsigned int max_uV;
};
struct as3711_regulator {
struct as3711_regulator_info *reg_info;
- struct regulator_dev *rdev;
};
/*
@@ -132,39 +130,37 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000),
};
-#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \
- [AS3711_REGULATOR_ ## _id] = { \
- .desc = { \
- .name = "as3711-regulator-" # _id, \
- .id = AS3711_REGULATOR_ ## _id, \
- .n_voltages = (_vmask + 1), \
- .ops = &as3711_ ## _sfx ## _ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .vsel_reg = AS3711_ ## _id ## _VOLTAGE, \
- .vsel_mask = _vmask << _vshift, \
- .enable_reg = AS3711_ ## _en_reg, \
- .enable_mask = BIT(_en_bit), \
- .min_uV = _min_uV, \
- .linear_ranges = as3711_ ## _sfx ## _ranges, \
- .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
- }, \
- .max_uV = _max_uV, \
+#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _sfx) \
+ [AS3711_REGULATOR_ ## _id] = { \
+ .desc = { \
+ .name = "as3711-regulator-" # _id, \
+ .id = AS3711_REGULATOR_ ## _id, \
+ .n_voltages = (_vmask + 1), \
+ .ops = &as3711_ ## _sfx ## _ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = AS3711_ ## _id ## _VOLTAGE, \
+ .vsel_mask = _vmask, \
+ .enable_reg = AS3711_ ## _en_reg, \
+ .enable_mask = BIT(_en_bit), \
+ .linear_ranges = as3711_ ## _sfx ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
+ }, \
}
static struct as3711_regulator_info as3711_reg_info[] = {
- AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, 0, 612500, 3350000, sd),
- AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, 0, 612500, 3350000, sd),
- AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, 0, 612500, 3350000, sd),
- AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, 0, 612500, 3350000, sd),
- AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo),
- AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo),
- AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
- AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
- AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
- AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
- AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
- AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, sd),
+ AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, sd),
+ AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, sd),
+ AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, sd),
+ AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, aldo),
+ AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, aldo),
+ AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, dldo),
+ AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, dldo),
+ AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, dldo),
+ AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, dldo),
+ AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, dldo),
+ AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, dldo),
/* StepUp output voltage depends on supplying regulator */
};
@@ -263,7 +259,6 @@ static int as3711_regulator_probe(struct platform_device *pdev)
ri->desc.name);
return PTR_ERR(rdev);
}
- reg->rdev = rdev;
}
platform_set_drvdata(pdev, regs);
return 0;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 004aadb7bcc1..2e1010a34ddc 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -245,7 +245,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
init_data = axp20x_matches[i].init_data;
- config.dev = &pdev->dev;
+ config.dev = pdev->dev.parent;
config.init_data = init_data;
config.regmap = axp20x->regmap;
config.of_node = axp20x_matches[i].of_node;
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 5d1fd6f3d10a..fe6ac69549a6 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -202,7 +202,6 @@ static struct bcm590xx_info bcm590xx_regs[] = {
struct bcm590xx_reg {
struct regulator_desc *desc;
struct bcm590xx *mfd;
- struct bcm590xx_info **info;
};
static int bcm590xx_get_vsel_register(int id)
@@ -389,11 +388,6 @@ static int bcm590xx_probe(struct platform_device *pdev)
if (!pmu->desc)
return -ENOMEM;
- pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
- sizeof(struct bcm590xx_info *), GFP_KERNEL);
- if (!pmu->info)
- return -ENOMEM;
-
info = bcm590xx_regs;
for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
@@ -403,8 +397,6 @@ static int bcm590xx_probe(struct platform_device *pdev)
reg_data = NULL;
/* Register the regulators */
- pmu->info[i] = info;
-
pmu->desc[i].name = info->name;
pmu->desc[i].supply_name = info->vin_name;
pmu->desc[i].id = i;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a3c3785901f5..cd87c0c37034 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -102,7 +102,7 @@ static int _regulator_disable(struct regulator_dev *rdev);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
-static void _notifier_call_chain(struct regulator_dev *rdev,
+static int _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
@@ -839,7 +839,7 @@ static void print_constraints(struct regulator_dev *rdev)
static int machine_constraints_voltage(struct regulator_dev *rdev,
struct regulation_constraints *constraints)
{
- struct regulator_ops *ops = rdev->desc->ops;
+ const struct regulator_ops *ops = rdev->desc->ops;
int ret;
/* do we need to apply the constraint voltage */
@@ -938,7 +938,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
static int machine_constraints_current(struct regulator_dev *rdev,
struct regulation_constraints *constraints)
{
- struct regulator_ops *ops = rdev->desc->ops;
+ const struct regulator_ops *ops = rdev->desc->ops;
int ret;
if (!constraints->min_uA && !constraints->max_uA)
@@ -982,7 +982,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
const struct regulation_constraints *constraints)
{
int ret = 0;
- struct regulator_ops *ops = rdev->desc->ops;
+ const struct regulator_ops *ops = rdev->desc->ops;
if (constraints)
rdev->constraints = kmemdup(constraints, sizeof(*constraints),
@@ -1759,6 +1759,45 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
return 0;
}
+/**
+ * _regulator_enable_delay - a delay helper function
+ * @delay: time to delay in microseconds
+ *
+ * Delay for the requested amount of time as per the guidelines in:
+ *
+ * Documentation/timers/timers-howto.txt
+ *
+ * The assumption here is that regulators will never be enabled in
+ * atomic context and therefore sleeping functions can be used.
+ */
+static void _regulator_enable_delay(unsigned int delay)
+{
+ unsigned int ms = delay / 1000;
+ unsigned int us = delay % 1000;
+
+ if (ms > 0) {
+ /*
+ * For small enough values, handle super-millisecond
+ * delays in the usleep_range() call below.
+ */
+ if (ms < 20)
+ us += ms * 1000;
+ else
+ msleep(ms);
+ }
+
+ /*
+ * Give the scheduler some room to coalesce with any other
+ * wakeup sources. For delays shorter than 10 us, don't even
+ * bother setting up high-resolution timers and just busy-
+ * loop.
+ */
+ if (us >= 10)
+ usleep_range(us, us + 100);
+ else
+ udelay(us);
+}
+
static int _regulator_do_enable(struct regulator_dev *rdev)
{
int ret, delay;
@@ -1774,6 +1813,31 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
trace_regulator_enable(rdev_get_name(rdev));
+ if (rdev->desc->off_on_delay) {
+ /* if needed, keep a distance of off_on_delay from last time
+ * this regulator was disabled.
+ */
+ unsigned long start_jiffy = jiffies;
+ unsigned long intended, max_delay, remaining;
+
+ max_delay = usecs_to_jiffies(rdev->desc->off_on_delay);
+ intended = rdev->last_off_jiffy + max_delay;
+
+ if (time_before(start_jiffy, intended)) {
+ /* calc remaining jiffies to deal with one-time
+ * timer wrapping.
+ * in case of multiple timer wrapping, either it can be
+ * detected by out-of-range remaining, or it cannot be
+ * detected and we gets a panelty of
+ * _regulator_enable_delay().
+ */
+ remaining = intended - start_jiffy;
+ if (remaining <= max_delay)
+ _regulator_enable_delay(
+ jiffies_to_usecs(remaining));
+ }
+ }
+
if (rdev->ena_pin) {
ret = regulator_ena_gpio_ctrl(rdev, true);
if (ret < 0)
@@ -1792,40 +1856,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* together. */
trace_regulator_enable_delay(rdev_get_name(rdev));
- /*
- * Delay for the requested amount of time as per the guidelines in:
- *
- * Documentation/timers/timers-howto.txt
- *
- * The assumption here is that regulators will never be enabled in
- * atomic context and therefore sleeping functions can be used.
- */
- if (delay) {
- unsigned int ms = delay / 1000;
- unsigned int us = delay % 1000;
-
- if (ms > 0) {
- /*
- * For small enough values, handle super-millisecond
- * delays in the usleep_range() call below.
- */
- if (ms < 20)
- us += ms * 1000;
- else
- msleep(ms);
- }
-
- /*
- * Give the scheduler some room to coalesce with any other
- * wakeup sources. For delays shorter than 10 us, don't even
- * bother setting up high-resolution timers and just busy-
- * loop.
- */
- if (us >= 10)
- usleep_range(us, us + 100);
- else
- udelay(us);
- }
+ _regulator_enable_delay(delay);
trace_regulator_enable_complete(rdev_get_name(rdev));
@@ -1919,6 +1950,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
return ret;
}
+ /* cares about last_off_jiffy only if off_on_delay is required by
+ * device.
+ */
+ if (rdev->desc->off_on_delay)
+ rdev->last_off_jiffy = jiffies;
+
trace_regulator_disable_complete(rdev_get_name(rdev));
return 0;
@@ -2208,9 +2245,9 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- struct regulator_dev *rdev = regulator->rdev;
- struct regulator_ops *ops = rdev->desc->ops;
- int ret;
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int ret;
if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
return rdev->desc->fixed_uV;
@@ -2270,8 +2307,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
unsigned *vsel_reg,
unsigned *vsel_mask)
{
- struct regulator_dev *rdev = regulator->rdev;
- struct regulator_ops *ops = rdev->desc->ops;
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
@@ -2297,8 +2334,8 @@ EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register);
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector)
{
- struct regulator_dev *rdev = regulator->rdev;
- struct regulator_ops *ops = rdev->desc->ops;
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
@@ -2369,6 +2406,55 @@ int regulator_is_supported_voltage(struct regulator *regulator,
}
EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
+static int _regulator_call_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct pre_voltage_change_data data;
+ int ret;
+
+ data.old_uV = _regulator_get_voltage(rdev);
+ data.min_uV = min_uV;
+ data.max_uV = max_uV;
+ ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
+ &data);
+ if (ret & NOTIFY_STOP_MASK)
+ return -EINVAL;
+
+ ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector);
+ if (ret >= 0)
+ return ret;
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE,
+ (void *)data.old_uV);
+
+ return ret;
+}
+
+static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev,
+ int uV, unsigned selector)
+{
+ struct pre_voltage_change_data data;
+ int ret;
+
+ data.old_uV = _regulator_get_voltage(rdev);
+ data.min_uV = uV;
+ data.max_uV = uV;
+ ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
+ &data);
+ if (ret & NOTIFY_STOP_MASK)
+ return -EINVAL;
+
+ ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+ if (ret >= 0)
+ return ret;
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE,
+ (void *)data.old_uV);
+
+ return ret;
+}
+
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
@@ -2396,8 +2482,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
if (rdev->desc->ops->set_voltage) {
- ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
- &selector);
+ ret = _regulator_call_set_voltage(rdev, min_uV, max_uV,
+ &selector);
if (ret >= 0) {
if (rdev->desc->ops->list_voltage)
@@ -2432,8 +2518,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
if (old_selector == selector)
ret = 0;
else
- ret = rdev->desc->ops->set_voltage_sel(
- rdev, ret);
+ ret = _regulator_call_set_voltage_sel(
+ rdev, best_val, selector);
} else {
ret = -EINVAL;
}
@@ -2572,8 +2658,8 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage);
int regulator_set_voltage_time(struct regulator *regulator,
int old_uV, int new_uV)
{
- struct regulator_dev *rdev = regulator->rdev;
- struct regulator_ops *ops = rdev->desc->ops;
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
int old_sel = -1;
int new_sel = -1;
int voltage;
@@ -3079,11 +3165,11 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
/* notify regulator consumers and downstream regulator consumers.
* Note mutex must be held by caller.
*/
-static void _notifier_call_chain(struct regulator_dev *rdev,
+static int _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
/* call rdev chain first */
- blocking_notifier_call_chain(&rdev->notifier, event, data);
+ return blocking_notifier_call_chain(&rdev->notifier, event, data);
}
/**
@@ -3336,9 +3422,9 @@ EXPORT_SYMBOL_GPL(regulator_mode_to_status);
*/
static int add_regulator_attributes(struct regulator_dev *rdev)
{
- struct device *dev = &rdev->dev;
- struct regulator_ops *ops = rdev->desc->ops;
- int status = 0;
+ struct device *dev = &rdev->dev;
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int status = 0;
/* some attributes need specific methods to be displayed */
if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
@@ -3516,12 +3602,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
return ERR_PTR(-EINVAL);
}
- init_data = config->init_data;
-
rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
if (rdev == NULL)
return ERR_PTR(-ENOMEM);
+ init_data = regulator_of_get_init_data(dev, regulator_desc,
+ &rdev->dev.of_node);
+ if (!init_data) {
+ init_data = config->init_data;
+ rdev->dev.of_node = of_node_get(config->of_node);
+ }
+
mutex_lock(&regulator_list_mutex);
mutex_init(&rdev->mutex);
@@ -3548,7 +3639,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* register with sysfs */
rdev->dev.class = &regulator_class;
- rdev->dev.of_node = of_node_get(config->of_node);
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(&regulator_no) - 1);
@@ -3905,7 +3995,7 @@ core_initcall(regulator_init);
static int __init regulator_init_complete(void)
{
struct regulator_dev *rdev;
- struct regulator_ops *ops;
+ const struct regulator_ops *ops;
struct regulation_constraints *c;
int enabled, ret;
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 1482adafa1ad..c78d2106d6cb 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,5 +1,5 @@
/*
- * da9211-regulator.c - Regulator device driver for DA9211
+ * da9211-regulator.c - Regulator device driver for DA9211/DA9213
* Copyright (C) 2014 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -24,9 +24,14 @@
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/da9211.h>
#include "da9211-regulator.h"
+/* DEVICE IDs */
+#define DA9211_DEVICE_ID 0x22
+#define DA9213_DEVICE_ID 0x23
+
#define DA9211_BUCK_MODE_SLEEP 1
#define DA9211_BUCK_MODE_SYNC 2
#define DA9211_BUCK_MODE_AUTO 3
@@ -42,6 +47,7 @@ struct da9211 {
struct regulator_dev *rdev[DA9211_MAX_REGULATORS];
int num_regulator;
int chip_irq;
+ int chip_id;
};
static const struct regmap_range_cfg da9211_regmap_range[] = {
@@ -52,14 +58,14 @@ static const struct regmap_range_cfg da9211_regmap_range[] = {
.window_start = 0,
.window_len = 256,
.range_min = 0,
- .range_max = 2*256,
+ .range_max = 5*128,
},
};
static const struct regmap_config da9211_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 2 * 256,
+ .max_register = 5 * 128,
.ranges = da9211_regmap_range,
.num_ranges = ARRAY_SIZE(da9211_regmap_range),
};
@@ -69,11 +75,20 @@ static const struct regmap_config da9211_regmap_config = {
#define DA9211_MAX_MV 1570
#define DA9211_STEP_MV 10
-/* Current limits for buck (uA) indices corresponds with register values */
+/* Current limits for DA9211 buck (uA) indices
+ * corresponds with register values
+ */
static const int da9211_current_limits[] = {
2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000,
3600000, 3800000, 4000000, 4200000, 4400000, 4600000, 4800000, 5000000
};
+/* Current limits for DA9213 buck (uA) indices
+ * corresponds with register values
+ */
+static const int da9213_current_limits[] = {
+ 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
+ 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
+};
static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev)
{
@@ -129,12 +144,26 @@ static int da9211_set_current_limit(struct regulator_dev *rdev, int min,
{
int id = rdev_get_id(rdev);
struct da9211 *chip = rdev_get_drvdata(rdev);
- int i;
+ int i, max_size;
+ const int *current_limits;
+
+ switch (chip->chip_id) {
+ case DA9211:
+ current_limits = da9211_current_limits;
+ max_size = ARRAY_SIZE(da9211_current_limits)-1;
+ break;
+ case DA9213:
+ current_limits = da9213_current_limits;
+ max_size = ARRAY_SIZE(da9213_current_limits)-1;
+ break;
+ default:
+ return -EINVAL;
+ }
/* search for closest to maximum */
- for (i = ARRAY_SIZE(da9211_current_limits)-1; i >= 0; i--) {
- if (min <= da9211_current_limits[i] &&
- max >= da9211_current_limits[i]) {
+ for (i = max_size; i >= 0; i--) {
+ if (min <= current_limits[i] &&
+ max >= current_limits[i]) {
return regmap_update_bits(chip->regmap,
DA9211_REG_BUCK_ILIM,
(0x0F << id*4), (i << id*4));
@@ -150,14 +179,28 @@ static int da9211_get_current_limit(struct regulator_dev *rdev)
struct da9211 *chip = rdev_get_drvdata(rdev);
unsigned int data;
int ret;
+ const int *current_limits;
+
+ switch (chip->chip_id) {
+ case DA9211:
+ current_limits = da9211_current_limits;
+ break;
+ case DA9213:
+ current_limits = da9213_current_limits;
+ break;
+ default:
+ return -EINVAL;
+ }
ret = regmap_read(chip->regmap, DA9211_REG_BUCK_ILIM, &data);
if (ret < 0)
return ret;
- /* select one of 16 values: 0000 (2000mA) to 1111 (5000mA) */
+ /* select one of 16 values: 0000 (2000mA or 3000mA)
+ * to 1111 (5000mA or 6000mA).
+ */
data = (data >> id*4) & 0x0F;
- return da9211_current_limits[data];
+ return current_limits[data];
}
static struct regulator_ops da9211_buck_ops = {
@@ -194,6 +237,59 @@ static struct regulator_desc da9211_regulators[] = {
DA9211_BUCK(BUCKB),
};
+#ifdef CONFIG_OF
+static struct of_regulator_match da9211_matches[] = {
+ [DA9211_ID_BUCKA] = { .name = "BUCKA" },
+ [DA9211_ID_BUCKB] = { .name = "BUCKB" },
+ };
+
+static struct da9211_pdata *da9211_parse_regulators_dt(
+ struct device *dev)
+{
+ struct da9211_pdata *pdata;
+ struct device_node *node;
+ int i, num, n;
+
+ node = of_get_child_by_name(dev->of_node, "regulators");
+ if (!node) {
+ dev_err(dev, "regulators node not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ num = of_regulator_match(dev, node, da9211_matches,
+ ARRAY_SIZE(da9211_matches));
+ of_node_put(node);
+ if (num < 0) {
+ dev_err(dev, "Failed to match regulators\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->num_buck = num;
+
+ n = 0;
+ for (i = 0; i < ARRAY_SIZE(da9211_matches); i++) {
+ if (!da9211_matches[i].init_data)
+ continue;
+
+ pdata->init_data[n] = da9211_matches[i].init_data;
+
+ n++;
+ }
+
+ return pdata;
+}
+#else
+static struct da9211_pdata *da9211_parse_regulators_dt(
+ struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
static irqreturn_t da9211_irq_handler(int irq, void *data)
{
struct da9211 *chip = data;
@@ -264,13 +360,11 @@ static int da9211_regulator_init(struct da9211 *chip)
}
for (i = 0; i < chip->num_regulator; i++) {
- if (chip->pdata)
- config.init_data =
- &(chip->pdata->init_data[i]);
-
+ config.init_data = chip->pdata->init_data[i];
config.dev = chip->dev;
config.driver_data = chip;
config.regmap = chip->regmap;
+ config.of_node = chip->dev->of_node;
chip->rdev[i] = devm_regulator_register(chip->dev,
&da9211_regulators[i], &config);
@@ -282,7 +376,7 @@ static int da9211_regulator_init(struct da9211 *chip)
if (chip->chip_irq != 0) {
ret = regmap_update_bits(chip->regmap,
- DA9211_REG_MASK_B, DA9211_M_OV_CURR_A << i, 1);
+ DA9211_REG_MASK_B, DA9211_M_OV_CURR_A << i, 0);
if (ret < 0) {
dev_err(chip->dev,
"Failed to update mask reg: %d\n", ret);
@@ -293,6 +387,7 @@ static int da9211_regulator_init(struct da9211 *chip)
return 0;
}
+
/*
* I2C driver interface functions
*/
@@ -301,14 +396,17 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
{
struct da9211 *chip;
int error, ret;
+ unsigned int data;
chip = devm_kzalloc(&i2c->dev, sizeof(struct da9211), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
chip->dev = &i2c->dev;
chip->regmap = devm_regmap_init_i2c(i2c, &da9211_regmap_config);
if (IS_ERR(chip->regmap)) {
error = PTR_ERR(chip->regmap);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ dev_err(chip->dev, "Failed to allocate register map: %d\n",
error);
return error;
}
@@ -316,11 +414,33 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, chip);
chip->pdata = i2c->dev.platform_data;
- if (!chip->pdata) {
- dev_err(&i2c->dev, "No platform init data supplied\n");
+
+ ret = regmap_read(chip->regmap, DA9211_REG_DEVICE_ID, &data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read DEVICE_ID reg: %d\n", ret);
+ return ret;
+ }
+
+ switch (data) {
+ case DA9211_DEVICE_ID:
+ chip->chip_id = DA9211;
+ break;
+ case DA9213_DEVICE_ID:
+ chip->chip_id = DA9213;
+ break;
+ default:
+ dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data);
return -ENODEV;
}
+ if (!chip->pdata)
+ chip->pdata = da9211_parse_regulators_dt(chip->dev);
+
+ if (IS_ERR(chip->pdata)) {
+ dev_err(chip->dev, "No regulators defined for the platform\n");
+ return PTR_ERR(chip->pdata);
+ }
+
chip->chip_irq = i2c->irq;
if (chip->chip_irq != 0) {
@@ -340,22 +460,32 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
ret = da9211_regulator_init(chip);
if (ret < 0)
- dev_err(&i2c->dev, "Failed to initialize regulator: %d\n", ret);
+ dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
return ret;
}
static const struct i2c_device_id da9211_i2c_id[] = {
- {"da9211", 0},
+ {"da9211", DA9211},
+ {"da9213", DA9213},
{},
};
-
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
+#ifdef CONFIG_OF
+static const struct of_device_id da9211_dt_ids[] = {
+ { .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
+ { .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] },
+ {},
+};
+MODULE_DEVICE_TABLE(of, da9211_dt_ids);
+#endif
+
static struct i2c_driver da9211_regulator_driver = {
.driver = {
.name = "da9211",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(da9211_dt_ids),
},
.probe = da9211_i2c_probe,
.id_table = da9211_i2c_id,
@@ -364,5 +494,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211");
+MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 88b1769e8058..93fa9df2721c 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,5 +1,5 @@
/*
- * da9211-regulator.h - Regulator definitions for DA9211
+ * da9211-regulator.h - Regulator definitions for DA9211/DA9213
* Copyright (C) 2014 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -53,12 +53,15 @@
/* BUCK Phase Selection*/
#define DA9211_REG_CONFIG_E 0x147
+/* Device ID */
+#define DA9211_REG_DEVICE_ID 0x201
+
/*
* Registers bits
*/
/* DA9211_REG_PAGE_CON (addr=0x00) */
#define DA9211_REG_PAGE_SHIFT 1
-#define DA9211_REG_PAGE_MASK 0x02
+#define DA9211_REG_PAGE_MASK 0x06
/* On I2C registers 0x00 - 0xFF */
#define DA9211_REG_PAGE0 0
/* On I2C registers 0x100 - 0x1FF */
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 714fd9a89aa1..f8e4257aef92 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -18,6 +18,8 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of_device.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -50,6 +52,11 @@
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
+enum fan53555_vendor {
+ FAN53555_VENDOR_FAIRCHILD = 0,
+ FAN53555_VENDOR_SILERGY,
+};
+
/* IC Type */
enum {
FAN53555_CHIP_ID_00 = 0,
@@ -60,7 +67,12 @@ enum {
FAN53555_CHIP_ID_05,
};
+enum {
+ SILERGY_SYR82X = 8,
+};
+
struct fan53555_device_info {
+ enum fan53555_vendor vendor;
struct regmap *regmap;
struct device *dev;
struct regulator_desc desc;
@@ -135,6 +147,38 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
+static int slew_rates[] = {
+ 64000,
+ 32000,
+ 16000,
+ 8000,
+ 4000,
+ 2000,
+ 1000,
+ 500,
+};
+
+static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
+{
+ struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+ int regval = -1, i;
+
+ for (i = 0; i < ARRAY_SIZE(slew_rates); i++) {
+ if (ramp <= slew_rates[i])
+ regval = i;
+ else
+ break;
+ }
+
+ if (regval < 0) {
+ dev_err(di->dev, "unsupported ramp value %d\n", ramp);
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(di->regmap, FAN53555_CONTROL,
+ CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT);
+}
+
static struct regulator_ops fan53555_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -146,8 +190,50 @@ static struct regulator_ops fan53555_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = fan53555_set_mode,
.get_mode = fan53555_get_mode,
+ .set_ramp_delay = fan53555_set_ramp,
};
+static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
+{
+ /* Init voltage range and step */
+ switch (di->chip_id) {
+ case FAN53555_CHIP_ID_00:
+ case FAN53555_CHIP_ID_01:
+ case FAN53555_CHIP_ID_03:
+ case FAN53555_CHIP_ID_05:
+ di->vsel_min = 600000;
+ di->vsel_step = 10000;
+ break;
+ case FAN53555_CHIP_ID_04:
+ di->vsel_min = 603000;
+ di->vsel_step = 12826;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
+{
+ /* Init voltage range and step */
+ switch (di->chip_id) {
+ case SILERGY_SYR82X:
+ di->vsel_min = 712500;
+ di->vsel_step = 12500;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* For 00,01,03,05 options:
* VOUT = 0.60V + NSELx * 10mV, from 0.60 to 1.23V.
* For 04 option:
@@ -156,7 +242,7 @@ static struct regulator_ops fan53555_regulator_ops = {
static int fan53555_device_setup(struct fan53555_device_info *di,
struct fan53555_platform_data *pdata)
{
- unsigned int reg, data, mask;
+ int ret = 0;
/* Setup voltage control register */
switch (pdata->sleep_vsel_id) {
@@ -172,33 +258,20 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
dev_err(di->dev, "Invalid VSEL ID!\n");
return -EINVAL;
}
- /* Init voltage range and step */
- switch (di->chip_id) {
- case FAN53555_CHIP_ID_00:
- case FAN53555_CHIP_ID_01:
- case FAN53555_CHIP_ID_03:
- case FAN53555_CHIP_ID_05:
- di->vsel_min = 600000;
- di->vsel_step = 10000;
+
+ switch (di->vendor) {
+ case FAN53555_VENDOR_FAIRCHILD:
+ ret = fan53555_voltages_setup_fairchild(di);
break;
- case FAN53555_CHIP_ID_04:
- di->vsel_min = 603000;
- di->vsel_step = 12826;
+ case FAN53555_VENDOR_SILERGY:
+ ret = fan53555_voltages_setup_silergy(di);
break;
default:
- dev_err(di->dev,
- "Chip ID[%d]\n not supported!\n", di->chip_id);
+ dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
return -EINVAL;
}
- /* Init slew rate */
- if (pdata->slew_rate & 0x7)
- di->slew_rate = pdata->slew_rate;
- else
- di->slew_rate = FAN53555_SLEW_RATE_64MV;
- reg = FAN53555_CONTROL;
- data = di->slew_rate << CTL_SLEW_SHIFT;
- mask = CTL_SLEW_MASK;
- return regmap_update_bits(di->regmap, reg, mask, data);
+
+ return ret;
}
static int fan53555_regulator_register(struct fan53555_device_info *di,
@@ -207,6 +280,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
struct regulator_desc *rdesc = &di->desc;
rdesc->name = "fan53555-reg";
+ rdesc->supply_name = "vin";
rdesc->ops = &fan53555_regulator_ops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->n_voltages = FAN53555_NVOLTAGES;
@@ -227,9 +301,46 @@ static struct regmap_config fan53555_regmap_config = {
.val_bits = 8,
};
+static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
+ struct device_node *np)
+{
+ struct fan53555_platform_data *pdata;
+ int ret;
+ u32 tmp;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->regulator = of_get_regulator_init_data(dev, np);
+
+ ret = of_property_read_u32(np, "fcs,suspend-voltage-selector",
+ &tmp);
+ if (!ret)
+ pdata->sleep_vsel_id = tmp;
+
+ return pdata;
+}
+
+static const struct of_device_id fan53555_dt_ids[] = {
+ {
+ .compatible = "fcs,fan53555",
+ .data = (void *)FAN53555_VENDOR_FAIRCHILD
+ }, {
+ .compatible = "silergy,syr827",
+ .data = (void *)FAN53555_VENDOR_SILERGY,
+ }, {
+ .compatible = "silergy,syr828",
+ .data = (void *)FAN53555_VENDOR_SILERGY,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fan53555_dt_ids);
+
static int fan53555_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device_node *np = client->dev.of_node;
struct fan53555_device_info *di;
struct fan53555_platform_data *pdata;
struct regulator_config config = { };
@@ -237,6 +348,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
int ret;
pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
+ pdata = fan53555_parse_dt(&client->dev, np);
+
if (!pdata || !pdata->regulator) {
dev_err(&client->dev, "Platform data not found!\n");
return -ENODEV;
@@ -247,13 +361,35 @@ static int fan53555_regulator_probe(struct i2c_client *client,
if (!di)
return -ENOMEM;
+ di->regulator = pdata->regulator;
+ if (client->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(fan53555_dt_ids),
+ &client->dev);
+ if (!match)
+ return -ENODEV;
+
+ di->vendor = (unsigned long) match->data;
+ } else {
+ /* if no ramp constraint set, get the pdata ramp_delay */
+ if (!di->regulator->constraints.ramp_delay) {
+ int slew_idx = (pdata->slew_rate & 0x7)
+ ? pdata->slew_rate : 0;
+
+ di->regulator->constraints.ramp_delay
+ = slew_rates[slew_idx];
+ }
+
+ di->vendor = id->driver_data;
+ }
+
di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
if (IS_ERR(di->regmap)) {
dev_err(&client->dev, "Failed to allocate regmap!\n");
return PTR_ERR(di->regmap);
}
di->dev = &client->dev;
- di->regulator = pdata->regulator;
i2c_set_clientdata(client, di);
/* Get chip ID */
ret = regmap_read(di->regmap, FAN53555_ID1, &val);
@@ -282,6 +418,8 @@ static int fan53555_regulator_probe(struct i2c_client *client,
config.init_data = di->regulator;
config.regmap = di->regmap;
config.driver_data = di;
+ config.of_node = np;
+
ret = fan53555_regulator_register(di, &config);
if (ret < 0)
dev_err(&client->dev, "Failed to register regulator!\n");
@@ -290,13 +428,20 @@ static int fan53555_regulator_probe(struct i2c_client *client,
}
static const struct i2c_device_id fan53555_id[] = {
- {"fan53555", -1},
+ {
+ .name = "fan53555",
+ .driver_data = FAN53555_VENDOR_FAIRCHILD
+ }, {
+ .name = "syr82x",
+ .driver_data = FAN53555_VENDOR_SILERGY
+ },
{ },
};
static struct i2c_driver fan53555_regulator_driver = {
.driver = {
.name = "fan53555-regulator",
+ .of_match_table = of_match_ptr(fan53555_dt_ids),
},
.probe = fan53555_regulator_probe,
.id_table = fan53555_id,
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
new file mode 100644
index 000000000000..156d0d1a55f1
--- /dev/null
+++ b/drivers/regulator/hi6421-regulator.c
@@ -0,0 +1,634 @@
+/*
+ * Device driver for regulators in Hi6421 IC
+ *
+ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ * Copyright (c) <2013-2014> Linaro Ltd.
+ * http://www.linaro.org
+ *
+ * Author: Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/hi6421-pmic.h>
+
+/*
+ * struct hi6421_regulator_pdata - Hi6421 regulator data of platform device
+ * @lock: mutex to serialize regulator enable
+ */
+struct hi6421_regulator_pdata {
+ struct mutex lock;
+};
+
+/*
+ * struct hi6421_regulator_info - hi6421 regulator information
+ * @desc: regulator description
+ * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
+ * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
+ */
+struct hi6421_regulator_info {
+ struct regulator_desc desc;
+ u8 mode_mask;
+ u32 eco_microamp;
+};
+
+/* HI6421 regulators */
+enum hi6421_regulator_id {
+ HI6421_LDO0,
+ HI6421_LDO1,
+ HI6421_LDO2,
+ HI6421_LDO3,
+ HI6421_LDO4,
+ HI6421_LDO5,
+ HI6421_LDO6,
+ HI6421_LDO7,
+ HI6421_LDO8,
+ HI6421_LDO9,
+ HI6421_LDO10,
+ HI6421_LDO11,
+ HI6421_LDO12,
+ HI6421_LDO13,
+ HI6421_LDO14,
+ HI6421_LDO15,
+ HI6421_LDO16,
+ HI6421_LDO17,
+ HI6421_LDO18,
+ HI6421_LDO19,
+ HI6421_LDO20,
+ HI6421_LDOAUDIO,
+ HI6421_BUCK0,
+ HI6421_BUCK1,
+ HI6421_BUCK2,
+ HI6421_BUCK3,
+ HI6421_BUCK4,
+ HI6421_BUCK5,
+ HI6421_NUM_REGULATORS,
+};
+
+#define HI6421_REGULATOR_OF_MATCH(_name, id) \
+{ \
+ .name = #_name, \
+ .driver_data = (void *) HI6421_##id, \
+}
+
+static struct of_regulator_match hi6421_regulator_match[] = {
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout0, LDO0),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout1, LDO1),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout2, LDO2),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout3, LDO3),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout4, LDO4),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout5, LDO5),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout6, LDO6),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout7, LDO7),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout8, LDO8),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout9, LDO9),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout10, LDO10),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout11, LDO11),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout12, LDO12),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout13, LDO13),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout14, LDO14),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout15, LDO15),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout16, LDO16),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout17, LDO17),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout18, LDO18),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout19, LDO19),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout20, LDO20),
+ HI6421_REGULATOR_OF_MATCH(hi6421_vout_audio, LDOAUDIO),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck0, BUCK0),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck1, BUCK1),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck2, BUCK2),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck3, BUCK3),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck4, BUCK4),
+ HI6421_REGULATOR_OF_MATCH(hi6421_buck5, BUCK5),
+};
+
+/* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */
+static const unsigned int ldo_0_voltages[] = {
+ 1500000, 1800000, 2400000, 2500000,
+ 2600000, 2700000, 2850000, 3000000,
+};
+
+/* LDO 8, 15 have same voltage table. */
+static const unsigned int ldo_8_voltages[] = {
+ 1500000, 1800000, 2400000, 2600000,
+ 2700000, 2850000, 3000000, 3300000,
+};
+
+/* Ranges are sorted in ascending order. */
+static const struct regulator_linear_range ldo_audio_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(2800000, 0, 3, 50000),
+ REGULATOR_LINEAR_RANGE(3000000, 4, 7, 100000),
+};
+
+static const unsigned int buck_3_voltages[] = {
+ 950000, 1050000, 1100000, 1117000,
+ 1134000, 1150000, 1167000, 1200000,
+};
+
+static const unsigned int buck_4_voltages[] = {
+ 1150000, 1200000, 1250000, 1350000,
+ 1700000, 1800000, 1900000, 2000000,
+};
+
+static const unsigned int buck_5_voltages[] = {
+ 1150000, 1200000, 1250000, 1350000,
+ 1600000, 1700000, 1800000, 1900000,
+};
+
+static const struct regulator_ops hi6421_ldo_ops;
+static const struct regulator_ops hi6421_ldo_linear_ops;
+static const struct regulator_ops hi6421_ldo_linear_range_ops;
+static const struct regulator_ops hi6421_buck012_ops;
+static const struct regulator_ops hi6421_buck345_ops;
+
+#define HI6421_LDO_ENABLE_TIME (350)
+/*
+ * _id - LDO id name string
+ * v_table - voltage table
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * ecomask - eco mode mask
+ * ecoamp - eco mode load uppler limit in uA
+ */
+#define HI6421_LDO(_id, v_table, vreg, vmask, ereg, emask, \
+ odelay, ecomask, ecoamp) \
+ [HI6421_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .ops = &hi6421_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421_##_id, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(v_table), \
+ .volt_table = v_table, \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = HI6421_LDO_ENABLE_TIME, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = ecomask, \
+ .eco_microamp = ecoamp, \
+ }
+
+/* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step
+ *
+ * _id - LDO id name string
+ * _min_uV - minimum voltage supported in uV
+ * n_volt - number of votages available
+ * vstep - voltage increase in each linear step in uV
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * ecomask - eco mode mask
+ * ecoamp - eco mode load uppler limit in uA
+ */
+#define HI6421_LDO_LINEAR(_id, _min_uV, n_volt, vstep, vreg, vmask, \
+ ereg, emask, odelay, ecomask, ecoamp) \
+ [HI6421_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .ops = &hi6421_ldo_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = _min_uV, \
+ .n_voltages = n_volt, \
+ .uV_step = vstep, \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = HI6421_LDO_ENABLE_TIME, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = ecomask, \
+ .eco_microamp = ecoamp, \
+ }
+
+/* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges
+ *
+ * _id - LDO id name string
+ * n_volt - number of votages available
+ * volt_ranges - array of regulator_linear_range
+ * vstep - voltage increase in each linear step in uV
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * ecomask - eco mode mask
+ * ecoamp - eco mode load uppler limit in uA
+ */
+#define HI6421_LDO_LINEAR_RANGE(_id, n_volt, volt_ranges, vreg, vmask, \
+ ereg, emask, odelay, ecomask, ecoamp) \
+ [HI6421_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .ops = &hi6421_ldo_linear_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421_##_id, \
+ .owner = THIS_MODULE, \
+ .n_voltages = n_volt, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = HI6421_LDO_ENABLE_TIME, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = ecomask, \
+ .eco_microamp = ecoamp, \
+ }
+
+/* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step
+ *
+ * _id - BUCK0/1/2 id name string
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * sleepmask - mask of sleep mode
+ * etime - enable time
+ * odelay - off/on delay time in uS
+ */
+#define HI6421_BUCK012(_id, vreg, vmask, ereg, emask, sleepmask, \
+ etime, odelay) \
+ [HI6421_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .ops = &hi6421_buck012_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = 700000, \
+ .n_voltages = 128, \
+ .uV_step = 7086, \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = etime, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = sleepmask, \
+ }
+
+/* HI6421 BUCK3/4/5 share similar configurations as LDOs, with exception
+ * that it supports SLEEP mode, so has different .ops.
+ *
+ * _id - LDO id name string
+ * v_table - voltage table
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * sleepmask - mask of sleep mode
+ */
+#define HI6421_BUCK345(_id, v_table, vreg, vmask, ereg, emask, \
+ odelay, sleepmask) \
+ [HI6421_##_id] = { \
+ .desc = { \
+ .name = #_id, \
+ .ops = &hi6421_buck345_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI6421_##_id, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(v_table), \
+ .volt_table = v_table, \
+ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
+ .enable_mask = emask, \
+ .enable_time = HI6421_LDO_ENABLE_TIME, \
+ .off_on_delay = odelay, \
+ }, \
+ .mode_mask = sleepmask, \
+ }
+
+/* HI6421 regulator information */
+static struct hi6421_regulator_info
+ hi6421_regulator_info[HI6421_NUM_REGULATORS] = {
+ HI6421_LDO(LDO0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
+ 10000, 0x20, 8000),
+ HI6421_LDO_LINEAR(LDO1, 1700000, 4, 100000, 0x21, 0x03, 0x21, 0x10,
+ 10000, 0x20, 5000),
+ HI6421_LDO_LINEAR(LDO2, 1050000, 8, 50000, 0x22, 0x07, 0x22, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO_LINEAR(LDO3, 1050000, 8, 50000, 0x23, 0x07, 0x23, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO(LDO4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO(LDO5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO(LDO6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO(LDO7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
+ 20000, 0x20, 5000),
+ HI6421_LDO(LDO8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
+ 20000, 0x20, 8000),
+ HI6421_LDO(LDO9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO(LDO20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
+ 40000, 0x20, 8000),
+ HI6421_LDO_LINEAR_RANGE(LDOAUDIO, 8, ldo_audio_volt_range, 0x36,
+ 0x70, 0x36, 0x01, 40000, 0x02, 5000),
+ HI6421_BUCK012(BUCK0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400, 20000),
+ HI6421_BUCK012(BUCK1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400, 20000),
+ HI6421_BUCK012(BUCK2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350, 100),
+ HI6421_BUCK345(BUCK3, buck_3_voltages, 0x13, 0x07, 0x12, 0x01,
+ 20000, 0x10),
+ HI6421_BUCK345(BUCK4, buck_4_voltages, 0x15, 0x07, 0x14, 0x01,
+ 20000, 0x10),
+ HI6421_BUCK345(BUCK5, buck_5_voltages, 0x17, 0x07, 0x16, 0x01,
+ 20000, 0x10),
+};
+
+static int hi6421_regulator_enable(struct regulator_dev *rdev)
+{
+ struct hi6421_regulator_pdata *pdata;
+
+ pdata = dev_get_drvdata(rdev->dev.parent);
+ /* hi6421 spec requires regulator enablement must be serialized:
+ * - Because when BUCK, LDO switching from off to on, it will have
+ * a huge instantaneous current; so you can not turn on two or
+ * more LDO or BUCKs simultaneously, or it may burn the chip.
+ */
+ mutex_lock(&pdata->lock);
+
+ /* call regulator regmap helper */
+ regulator_enable_regmap(rdev);
+
+ mutex_unlock(&pdata->lock);
+ return 0;
+}
+
+static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 reg_val;
+
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
+ if (reg_val & info->mode_mask)
+ return REGULATOR_MODE_IDLE;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 reg_val;
+
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
+ if (reg_val & info->mode_mask)
+ return REGULATOR_MODE_STANDBY;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 new_mode;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ new_mode = 0;
+ break;
+ case REGULATOR_MODE_IDLE:
+ new_mode = info->mode_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set mode */
+ regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ info->mode_mask, new_mode);
+
+ return 0;
+}
+
+static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 new_mode;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ new_mode = 0;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ new_mode = info->mode_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set mode */
+ regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ info->mode_mask, new_mode);
+
+ return 0;
+}
+
+unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (load_uA > info->eco_microamp)
+ return REGULATOR_MODE_NORMAL;
+
+ return REGULATOR_MODE_IDLE;
+}
+
+static const struct regulator_ops hi6421_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = hi6421_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421_regulator_ldo_get_mode,
+ .set_mode = hi6421_regulator_ldo_set_mode,
+ .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
+};
+
+static const struct regulator_ops hi6421_ldo_linear_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = hi6421_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421_regulator_ldo_get_mode,
+ .set_mode = hi6421_regulator_ldo_set_mode,
+ .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
+};
+
+static const struct regulator_ops hi6421_ldo_linear_range_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = hi6421_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421_regulator_ldo_get_mode,
+ .set_mode = hi6421_regulator_ldo_set_mode,
+ .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
+};
+
+static const struct regulator_ops hi6421_buck012_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = hi6421_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421_regulator_buck_get_mode,
+ .set_mode = hi6421_regulator_buck_set_mode,
+};
+
+static const struct regulator_ops hi6421_buck345_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = hi6421_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = hi6421_regulator_buck_get_mode,
+ .set_mode = hi6421_regulator_buck_set_mode,
+};
+
+static int hi6421_regulator_register(struct platform_device *pdev,
+ struct regmap *rmap,
+ struct regulator_init_data *init_data,
+ int id, struct device_node *np)
+{
+ struct hi6421_regulator_info *info = NULL;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ /* assign per-regulator data */
+ info = &hi6421_regulator_info[id];
+
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = info;
+ config.regmap = rmap;
+ config.of_node = np;
+
+ /* register regulator with framework */
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static int hi6421_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct hi6421_pmic *pmic;
+ struct hi6421_regulator_pdata *pdata;
+ int i, ret = 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ mutex_init(&pdata->lock);
+ platform_set_drvdata(pdev, pdata);
+
+ np = of_get_child_by_name(dev->parent->of_node, "regulators");
+ if (!np)
+ return -ENODEV;
+
+ ret = of_regulator_match(dev, np,
+ hi6421_regulator_match,
+ ARRAY_SIZE(hi6421_regulator_match));
+ of_node_put(np);
+ if (ret < 0) {
+ dev_err(dev, "Error parsing regulator init data: %d\n", ret);
+ return ret;
+ }
+
+ pmic = dev_get_drvdata(dev->parent);
+
+ for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
+ ret = hi6421_regulator_register(pdev, pmic->regmap,
+ hi6421_regulator_match[i].init_data, i,
+ hi6421_regulator_match[i].of_node);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver hi6421_regulator_driver = {
+ .driver = {
+ .name = "hi6421-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = hi6421_regulator_probe,
+};
+module_platform_driver(hi6421_regulator_driver);
+
+MODULE_AUTHOR("Guodong Xu <guodong.xu@linaro.org>");
+MODULE_DESCRIPTION("Hi6421 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 84bbda10c396..80ba2a35a04b 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -35,4 +35,18 @@ struct regulator {
struct dentry *debugfs;
};
+#ifdef CONFIG_OF
+struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
+ const struct regulator_desc *desc,
+ struct device_node **node);
+#else
+static inline struct regulator_init_data *
+regulator_of_get_init_data(struct device *dev,
+ const struct regulator_desc *desc,
+ struct device_node **node)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
new file mode 100644
index 000000000000..92fefd98da58
--- /dev/null
+++ b/drivers/regulator/isl9305.c
@@ -0,0 +1,207 @@
+/*
+ * isl9305 - Intersil ISL9305 DCDC regulator
+ *
+ * Copyright 2014 Linaro Ltd
+ *
+ * Author: Mark Brown <broonie@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/platform_data/isl9305.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/*
+ * Registers
+ */
+#define ISL9305_DCD1OUT 0x0
+#define ISL9305_DCD2OUT 0x1
+#define ISL9305_LDO1OUT 0x2
+#define ISL9305_LDO2OUT 0x3
+#define ISL9305_DCD_PARAMETER 0x4
+#define ISL9305_SYSTEM_PARAMETER 0x5
+#define ISL9305_DCD_SRCTL 0x6
+
+#define ISL9305_MAX_REG ISL9305_DCD_SRCTL
+
+/*
+ * DCD_PARAMETER
+ */
+#define ISL9305_DCD_PHASE 0x40
+#define ISL9305_DCD2_ULTRA 0x20
+#define ISL9305_DCD1_ULTRA 0x10
+#define ISL9305_DCD2_BLD 0x08
+#define ISL9305_DCD1_BLD 0x04
+#define ISL9305_DCD2_MODE 0x02
+#define ISL9305_DCD1_MODE 0x01
+
+/*
+ * SYSTEM_PARAMETER
+ */
+#define ISL9305_I2C_EN 0x40
+#define ISL9305_DCDPOR_MASK 0x30
+#define ISL9305_LDO2_EN 0x08
+#define ISL9305_LDO1_EN 0x04
+#define ISL9305_DCD2_EN 0x02
+#define ISL9305_DCD1_EN 0x01
+
+/*
+ * DCD_SRCTL
+ */
+#define ISL9305_DCD2SR_MASK 0xc0
+#define ISL9305_DCD1SR_MASK 0x07
+
+static const struct regulator_ops isl9305_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc isl9305_regulators[] = {
+ [ISL9305_DCD1] = {
+ .name = "DCD1",
+ .of_match = of_match_ptr("dcd1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x70,
+ .min_uV = 825000,
+ .uV_step = 25000,
+ .vsel_reg = ISL9305_DCD1OUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = ISL9305_SYSTEM_PARAMETER,
+ .enable_mask = ISL9305_DCD1_EN,
+ .supply_name = "VINDCD1",
+ .ops = &isl9305_ops,
+ },
+ [ISL9305_DCD2] = {
+ .name = "DCD2",
+ .of_match = of_match_ptr("dcd2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x70,
+ .min_uV = 825000,
+ .uV_step = 25000,
+ .vsel_reg = ISL9305_DCD2OUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = ISL9305_SYSTEM_PARAMETER,
+ .enable_mask = ISL9305_DCD2_EN,
+ .supply_name = "VINDCD2",
+ .ops = &isl9305_ops,
+ },
+ [ISL9305_LDO1] = {
+ .name = "LDO1",
+ .of_match = of_match_ptr("ldo1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x37,
+ .min_uV = 900000,
+ .uV_step = 50000,
+ .vsel_reg = ISL9305_LDO1OUT,
+ .vsel_mask = 0x3f,
+ .enable_reg = ISL9305_SYSTEM_PARAMETER,
+ .enable_mask = ISL9305_LDO1_EN,
+ .supply_name = "VINLDO1",
+ .ops = &isl9305_ops,
+ },
+ [ISL9305_LDO2] = {
+ .name = "LDO2",
+ .of_match = of_match_ptr("ldo2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x37,
+ .min_uV = 900000,
+ .uV_step = 50000,
+ .vsel_reg = ISL9305_LDO2OUT,
+ .vsel_mask = 0x3f,
+ .enable_reg = ISL9305_SYSTEM_PARAMETER,
+ .enable_mask = ISL9305_LDO2_EN,
+ .supply_name = "VINLDO2",
+ .ops = &isl9305_ops,
+ },
+};
+
+static const struct regmap_config isl9305_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = ISL9305_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int isl9305_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_config config = { };
+ struct isl9305_pdata *pdata = i2c->dev.platform_data;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i, ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &isl9305_regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "Failed to create regmap: %d\n", ret);
+ return ret;
+ }
+
+ config.dev = &i2c->dev;
+
+ for (i = 0; i < ARRAY_SIZE(isl9305_regulators); i++) {
+ if (pdata)
+ config.init_data = pdata->init_data[i];
+ else
+ config.init_data = NULL;
+
+ rdev = devm_regulator_register(&i2c->dev,
+ &isl9305_regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "Failed to register %s: %d\n",
+ isl9305_regulators[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id isl9305_dt_ids[] = {
+ { .compatible = "isl,isl9305" },
+ { .compatible = "isl,isl9305h" },
+ {},
+};
+#endif
+
+static const struct i2c_device_id isl9305_i2c_id[] = {
+ { "isl9305", },
+ { "isl9305h", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl9305_i2c_id);
+
+static struct i2c_driver isl9305_regulator_driver = {
+ .driver = {
+ .name = "isl9305",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(isl9305_dt_ids),
+ },
+ .probe = isl9305_i2c_probe,
+ .id_table = isl9305_i2c_id,
+};
+
+module_i2c_driver(isl9305_regulator_driver);
+
+MODULE_AUTHOR("Mark Brown");
+MODULE_DESCRIPTION("Intersil ISL9305 DCDC regulator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index c756955bfcc5..0ce8e4e0fa73 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -372,6 +372,7 @@ static bool ltc3589_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case LTC3589_IRQSTAT:
case LTC3589_PGSTAT:
+ case LTC3589_VCCR:
return true;
}
return false;
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index d23d0577754b..86db310d5304 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -24,6 +24,8 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max1586.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
#define MAX1586_V3_MAX_VSEL 31
#define MAX1586_V6_MAX_VSEL 3
@@ -157,13 +159,87 @@ static struct regulator_desc max1586_reg[] = {
},
};
+static int of_get_max1586_platform_data(struct device *dev,
+ struct max1586_platform_data *pdata)
+{
+ struct max1586_subdev_data *sub;
+ struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)];
+ struct device_node *np = dev->of_node;
+ int i, matched;
+
+ if (of_property_read_u32(np, "v3-gain",
+ &pdata->v3_gain) < 0) {
+ dev_err(dev, "%s has no 'v3-gain' property\n", np->full_name);
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(np, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rmatch); i++)
+ rmatch[i].name = max1586_reg[i].name;
+
+ matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+ of_node_put(np);
+ /*
+ * If matched is 0, ie. neither Output_V3 nor Output_V6 have been found,
+ * return 0, which signals the normal situation where no subregulator is
+ * available. This is normal because the max1586 doesn't provide any
+ * readback support, so the subregulators can't report any status
+ * anyway. If matched < 0, return the error.
+ */
+ if (matched <= 0)
+ return matched;
+
+ pdata->subdevs = devm_kzalloc(dev, sizeof(struct max1586_subdev_data) *
+ matched, GFP_KERNEL);
+ if (!pdata->subdevs)
+ return -ENOMEM;
+
+ pdata->num_subdevs = matched;
+ sub = pdata->subdevs;
+
+ for (i = 0; i < matched; i++) {
+ sub->id = i;
+ sub->name = rmatch[i].of_node->name;
+ sub->platform_data = rmatch[i].init_data;
+ sub++;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id max1586_of_match[] = {
+ { .compatible = "maxim,max1586", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max1586_of_match);
+
static int max1586_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct max1586_platform_data *pdata, pdata_of;
struct regulator_config config = { };
struct max1586_data *max1586;
- int i, id;
+ int i, id, ret;
+ const struct of_device_id *match;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (client->dev.of_node && !pdata) {
+ match = of_match_device(of_match_ptr(max1586_of_match),
+ &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ ret = of_get_max1586_platform_data(&client->dev, &pdata_of);
+ if (ret < 0)
+ return ret;
+ pdata = &pdata_of;
+ }
max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data),
GFP_KERNEL);
@@ -229,6 +305,7 @@ static struct i2c_driver max1586_pmic_driver = {
.driver = {
.name = "max1586",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max1586_of_match),
},
.id_table = max1586_id,
};
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c
new file mode 100644
index 000000000000..d89792b084e9
--- /dev/null
+++ b/drivers/regulator/max77802.c
@@ -0,0 +1,586 @@
+/*
+ * max77802.c - Regulator driver for the Maxim 77802
+ *
+ * Copyright (C) 2013-2014 Google, Inc
+ * Simon Glass <sjg@chromium.org>
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Chiwoong Byun <woong.byun@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/max77686.h>
+#include <linux/mfd/max77686-private.h>
+
+/* Default ramp delay in case it is not manually set */
+#define MAX77802_RAMP_DELAY 100000 /* uV/us */
+
+#define MAX77802_OPMODE_SHIFT_LDO 6
+#define MAX77802_OPMODE_BUCK234_SHIFT 4
+#define MAX77802_OPMODE_MASK 0x3
+
+#define MAX77802_VSEL_MASK 0x3F
+#define MAX77802_DVS_VSEL_MASK 0xFF
+
+#define MAX77802_RAMP_RATE_MASK_2BIT 0xC0
+#define MAX77802_RAMP_RATE_SHIFT_2BIT 6
+#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0
+#define MAX77802_RAMP_RATE_SHIFT_4BIT 4
+
+/* MAX77802 has two register formats: 2-bit and 4-bit */
+static const unsigned int ramp_table_77802_2bit[] = {
+ 12500,
+ 25000,
+ 50000,
+ 100000,
+};
+
+static unsigned int ramp_table_77802_4bit[] = {
+ 1000, 2000, 3030, 4000,
+ 5000, 5880, 7140, 8330,
+ 9090, 10000, 11110, 12500,
+ 16670, 25000, 50000, 100000,
+};
+
+struct max77802_regulator_prv {
+ unsigned int opmode[MAX77802_REG_MAX];
+};
+
+static int max77802_get_opmode_shift(int id)
+{
+ if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 &&
+ id <= MAX77802_BUCK10))
+ return 0;
+
+ if (id >= MAX77802_BUCK2 && id <= MAX77802_BUCK4)
+ return MAX77802_OPMODE_BUCK234_SHIFT;
+
+ if (id >= MAX77802_LDO1 && id <= MAX77802_LDO35)
+ return MAX77802_OPMODE_SHIFT_LDO;
+
+ return -EINVAL;
+}
+
+/*
+ * Some BUCKS supports Normal[ON/OFF] mode during suspend
+ *
+ * BUCK 1, 6, 2-4, 5, 7-10 (all)
+ *
+ * The other mode (0x02) will make PWRREQ switch between normal
+ * and low power.
+ */
+static int max77802_buck_set_suspend_disable(struct regulator_dev *rdev)
+{
+ unsigned int val = MAX77802_OPMODE_STANDBY;
+ struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int shift = max77802_get_opmode_shift(id);
+
+ max77802->opmode[id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val << shift);
+}
+
+/*
+ * Some LDOs supports LPM-ON/OFF/Normal-ON mode during suspend state
+ * (Enable Control Logic1 by PWRREQ)
+ *
+ * LDOs 2, 4-19, 22-35.
+ *
+ */
+static int max77802_ldo_set_suspend_mode_logic1(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ unsigned int val;
+ int shift = max77802_get_opmode_shift(id);
+
+ switch (mode) {
+ case REGULATOR_MODE_IDLE: /* ON in LP Mode */
+ val = MAX77802_OPMODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
+ val = MAX77802_OPMODE_NORMAL;
+ break;
+ case REGULATOR_MODE_STANDBY: /* ON/OFF by PWRREQ */
+ val = MAX77802_OPMODE_STANDBY;
+ break;
+ default:
+ dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
+ rdev->desc->name, mode);
+ return -EINVAL;
+ }
+
+ max77802->opmode[id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val << shift);
+}
+
+/*
+ * Mode 1 (Output[ON/OFF] by PWRREQ) is not supported on some LDOs
+ * (Enable Control Logic2 by PWRREQ)
+ *
+ * LDOs 1, 20, 21, and 3,
+ *
+ */
+static int max77802_ldo_set_suspend_mode_logic2(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ unsigned int val;
+ int shift = max77802_get_opmode_shift(id);
+
+ switch (mode) {
+ case REGULATOR_MODE_IDLE: /* ON in LP Mode */
+ val = MAX77802_OPMODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
+ val = MAX77802_OPMODE_NORMAL;
+ break;
+ default:
+ dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
+ rdev->desc->name, mode);
+ return -EINVAL;
+ }
+
+ max77802->opmode[id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val << shift);
+}
+
+static int max77802_enable(struct regulator_dev *rdev)
+{
+ struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int shift = max77802_get_opmode_shift(id);
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ max77802->opmode[id] << shift);
+}
+
+static int max77802_find_ramp_value(struct regulator_dev *rdev,
+ const unsigned int limits[], int size,
+ unsigned int ramp_delay)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ramp_delay <= limits[i])
+ return i;
+ }
+
+ /* Use maximum value for no ramp control */
+ dev_warn(&rdev->dev, "%s: ramp_delay: %d not supported, setting 100000\n",
+ rdev->desc->name, ramp_delay);
+ return size - 1;
+}
+
+/* Used for BUCKs 2-4 */
+static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ int id = rdev_get_id(rdev);
+ unsigned int ramp_value;
+
+ if (id > MAX77802_BUCK4) {
+ dev_warn(&rdev->dev,
+ "%s: regulator: ramp delay not supported\n",
+ rdev->desc->name);
+ return -EINVAL;
+ }
+ ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit,
+ ARRAY_SIZE(ramp_table_77802_2bit), ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ MAX77802_RAMP_RATE_MASK_2BIT,
+ ramp_value << MAX77802_RAMP_RATE_SHIFT_2BIT);
+}
+
+/* For BUCK1, 6 */
+static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ unsigned int ramp_value;
+
+ ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_4bit,
+ ARRAY_SIZE(ramp_table_77802_4bit), ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ MAX77802_RAMP_RATE_MASK_4BIT,
+ ramp_value << MAX77802_RAMP_RATE_SHIFT_4BIT);
+}
+
+/*
+ * LDOs 2, 4-19, 22-35
+ */
+static struct regulator_ops max77802_ldo_ops_logic1 = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77802_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_mode = max77802_ldo_set_suspend_mode_logic1,
+};
+
+/*
+ * LDOs 1, 20, 21, 3
+ */
+static struct regulator_ops max77802_ldo_ops_logic2 = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77802_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_mode = max77802_ldo_set_suspend_mode_logic2,
+};
+
+/* BUCKS 1, 6 */
+static struct regulator_ops max77802_buck_16_dvs_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77802_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = max77802_set_ramp_delay_4bit,
+ .set_suspend_disable = max77802_buck_set_suspend_disable,
+};
+
+/* BUCKs 2-4, 5, 7-10 */
+static struct regulator_ops max77802_buck_dvs_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77802_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = max77802_set_ramp_delay_2bit,
+ .set_suspend_disable = max77802_buck_set_suspend_disable,
+};
+
+/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */
+#define regulator_77802_desc_p_ldo(num, supply, log) { \
+ .name = "LDO"#num, \
+ .id = MAX77802_LDO##num, \
+ .supply_name = "inl"#supply, \
+ .ops = &max77802_ldo_ops_logic##log, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 800000, \
+ .uV_step = 50000, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 1 << 6, \
+ .vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77802_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
+}
+
+/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */
+#define regulator_77802_desc_n_ldo(num, supply, log) { \
+ .name = "LDO"#num, \
+ .id = MAX77802_LDO##num, \
+ .supply_name = "inl"#supply, \
+ .ops = &max77802_ldo_ops_logic##log, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 800000, \
+ .uV_step = 25000, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 1 << 6, \
+ .vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77802_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
+}
+
+/* BUCKs 1, 6 */
+#define regulator_77802_desc_16_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77802_BUCK##num, \
+ .supply_name = "inb"#num, \
+ .ops = &max77802_buck_16_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 612500, \
+ .uV_step = 6250, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 1 << 8, \
+ .vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
+ .vsel_mask = MAX77802_DVS_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
+ .enable_mask = MAX77802_OPMODE_MASK, \
+}
+
+/* BUCKS 2-4 */
+#define regulator_77802_desc_234_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77802_BUCK##num, \
+ .supply_name = "inb"#num, \
+ .ops = &max77802_buck_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 600000, \
+ .uV_step = 6250, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 0x91, \
+ .vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
+ .vsel_mask = MAX77802_DVS_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
+ .enable_mask = MAX77802_OPMODE_MASK << \
+ MAX77802_OPMODE_BUCK234_SHIFT, \
+}
+
+/* BUCK 5 */
+#define regulator_77802_desc_buck5(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77802_BUCK##num, \
+ .supply_name = "inb"#num, \
+ .ops = &max77802_buck_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 750000, \
+ .uV_step = 50000, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 1 << 6, \
+ .vsel_reg = MAX77802_REG_BUCK5OUT, \
+ .vsel_mask = MAX77802_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_BUCK5CTRL, \
+ .enable_mask = MAX77802_OPMODE_MASK, \
+}
+
+/* BUCKs 7-10 */
+#define regulator_77802_desc_buck7_10(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77802_BUCK##num, \
+ .supply_name = "inb"#num, \
+ .ops = &max77802_buck_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 750000, \
+ .uV_step = 50000, \
+ .ramp_delay = MAX77802_RAMP_DELAY, \
+ .n_voltages = 1 << 6, \
+ .vsel_reg = MAX77802_REG_BUCK7OUT + (num - 7) * 3, \
+ .vsel_mask = MAX77802_VSEL_MASK, \
+ .enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \
+ .enable_mask = MAX77802_OPMODE_MASK, \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_77802_desc_16_buck(1),
+ regulator_77802_desc_234_buck(2),
+ regulator_77802_desc_234_buck(3),
+ regulator_77802_desc_234_buck(4),
+ regulator_77802_desc_buck5(5),
+ regulator_77802_desc_16_buck(6),
+ regulator_77802_desc_buck7_10(7),
+ regulator_77802_desc_buck7_10(8),
+ regulator_77802_desc_buck7_10(9),
+ regulator_77802_desc_buck7_10(10),
+ regulator_77802_desc_n_ldo(1, 10, 2),
+ regulator_77802_desc_n_ldo(2, 10, 1),
+ regulator_77802_desc_p_ldo(3, 3, 2),
+ regulator_77802_desc_p_ldo(4, 6, 1),
+ regulator_77802_desc_p_ldo(5, 3, 1),
+ regulator_77802_desc_p_ldo(6, 3, 1),
+ regulator_77802_desc_p_ldo(7, 3, 1),
+ regulator_77802_desc_n_ldo(8, 1, 1),
+ regulator_77802_desc_p_ldo(9, 5, 1),
+ regulator_77802_desc_p_ldo(10, 4, 1),
+ regulator_77802_desc_p_ldo(11, 4, 1),
+ regulator_77802_desc_p_ldo(12, 9, 1),
+ regulator_77802_desc_p_ldo(13, 4, 1),
+ regulator_77802_desc_p_ldo(14, 4, 1),
+ regulator_77802_desc_n_ldo(15, 1, 1),
+ regulator_77802_desc_n_ldo(17, 2, 1),
+ regulator_77802_desc_p_ldo(18, 7, 1),
+ regulator_77802_desc_p_ldo(19, 5, 1),
+ regulator_77802_desc_p_ldo(20, 7, 2),
+ regulator_77802_desc_p_ldo(21, 6, 2),
+ regulator_77802_desc_p_ldo(23, 9, 1),
+ regulator_77802_desc_p_ldo(24, 6, 1),
+ regulator_77802_desc_p_ldo(25, 9, 1),
+ regulator_77802_desc_p_ldo(26, 9, 1),
+ regulator_77802_desc_n_ldo(27, 2, 1),
+ regulator_77802_desc_p_ldo(28, 7, 1),
+ regulator_77802_desc_p_ldo(29, 7, 1),
+ regulator_77802_desc_n_ldo(30, 2, 1),
+ regulator_77802_desc_p_ldo(32, 9, 1),
+ regulator_77802_desc_p_ldo(33, 6, 1),
+ regulator_77802_desc_p_ldo(34, 9, 1),
+ regulator_77802_desc_n_ldo(35, 2, 1),
+};
+
+#ifdef CONFIG_OF
+static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
+ struct max77686_platform_data *pdata)
+{
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *pmic_np, *regulators_np;
+ struct max77686_regulator_data *rdata;
+ struct of_regulator_match rmatch;
+ unsigned int i;
+
+ pmic_np = iodev->dev->of_node;
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
+ if (!regulators_np) {
+ dev_err(&pdev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ pdata->num_regulators = ARRAY_SIZE(regulators);
+ rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ of_node_put(regulators_np);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ rmatch.name = regulators[i].name;
+ rmatch.init_data = NULL;
+ rmatch.of_node = NULL;
+ if (of_regulator_match(&pdev->dev, regulators_np, &rmatch,
+ 1) != 1) {
+ dev_warn(&pdev->dev, "No matching regulator for '%s'\n",
+ rmatch.name);
+ continue;
+ }
+ rdata[i].initdata = rmatch.init_data;
+ rdata[i].of_node = rmatch.of_node;
+ rdata[i].id = regulators[i].id;
+ }
+
+ pdata->regulators = rdata;
+ of_node_put(regulators_np);
+
+ return 0;
+}
+#else
+static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
+ struct max77686_platform_data *pdata)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int max77802_pmic_probe(struct platform_device *pdev)
+{
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max77802_regulator_prv *max77802;
+ int i, ret = 0, val;
+ struct regulator_config config = { };
+
+ /* This is allocated by the MFD driver */
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data found for regulator\n");
+ return -ENODEV;
+ }
+
+ max77802 = devm_kzalloc(&pdev->dev,
+ sizeof(struct max77802_regulator_prv),
+ GFP_KERNEL);
+ if (!max77802)
+ return -ENOMEM;
+
+ if (iodev->dev->of_node) {
+ ret = max77802_pmic_dt_parse_pdata(pdev, pdata);
+ if (ret)
+ return ret;
+ }
+
+ config.dev = iodev->dev;
+ config.regmap = iodev->regmap;
+ config.driver_data = max77802;
+ platform_set_drvdata(pdev, max77802);
+
+ for (i = 0; i < MAX77802_REG_MAX; i++) {
+ struct regulator_dev *rdev;
+ int id = pdata->regulators[i].id;
+ int shift = max77802_get_opmode_shift(id);
+
+ config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].of_node;
+
+ ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val);
+ val = val >> shift & MAX77802_OPMODE_MASK;
+
+ /*
+ * If the regulator is disabled and the system warm rebooted,
+ * the hardware reports OFF as the regulator operating mode.
+ * Default to operating mode NORMAL in that case.
+ */
+ if (val == MAX77802_OPMODE_OFF)
+ max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+ else
+ max77802->opmode[id] = val;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "regulator init failed for %d\n", i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id max77802_pmic_id[] = {
+ {"max77802-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, max77802_pmic_id);
+
+static struct platform_driver max77802_pmic_driver = {
+ .driver = {
+ .name = "max77802-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max77802_pmic_probe,
+ .id_table = max77802_pmic_id,
+};
+
+module_platform_driver(max77802_pmic_driver);
+
+MODULE_DESCRIPTION("MAXIM 77802 Regulator Driver");
+MODULE_AUTHOR("Simon Glass <sjg@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index f374fa57220f..793b662a1967 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -526,6 +526,7 @@ static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
+static struct regulator_ops mc13892_vcam_ops;
static int mc13892_regulator_probe(struct platform_device *pdev)
{
@@ -582,10 +583,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
}
mc13xxx_unlock(mc13892);
- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
- = mc13892_vcam_set_mode;
- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
- = mc13892_vcam_get_mode;
+ /* update mc13892_vcam ops */
+ memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
+ sizeof(struct regulator_ops));
+ mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
+ mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
+ mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
ARRAY_SIZE(mc13892_regulators));
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index ee5e67bc8d5b..7a51814abdc5 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -14,8 +14,11 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#include "internal.h"
+
static void of_get_regulation_constraints(struct device_node *np,
struct regulator_init_data **init_data)
{
@@ -189,3 +192,51 @@ int of_regulator_match(struct device *dev, struct device_node *node,
return count;
}
EXPORT_SYMBOL_GPL(of_regulator_match);
+
+struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
+ const struct regulator_desc *desc,
+ struct device_node **node)
+{
+ struct device_node *search, *child;
+ struct regulator_init_data *init_data = NULL;
+ const char *name;
+
+ if (!dev->of_node || !desc->of_match)
+ return NULL;
+
+ if (desc->regulators_node)
+ search = of_get_child_by_name(dev->of_node,
+ desc->regulators_node);
+ else
+ search = dev->of_node;
+
+ if (!search) {
+ dev_err(dev, "Failed to find regulator container node\n");
+ return NULL;
+ }
+
+ for_each_child_of_node(search, child) {
+ name = of_get_property(child, "regulator-compatible", NULL);
+ if (!name)
+ name = child->name;
+
+ if (strcmp(desc->of_match, name))
+ continue;
+
+ init_data = of_get_regulator_init_data(dev, child);
+ if (!init_data) {
+ dev_err(dev,
+ "failed to parse DT for regulator %s\n",
+ child->name);
+ break;
+ }
+
+ of_node_get(child);
+ *node = child;
+ break;
+ }
+
+ of_node_put(search);
+
+ return init_data;
+}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
new file mode 100644
index 000000000000..d3f55eaea058
--- /dev/null
+++ b/drivers/regulator/pwm-regulator.c
@@ -0,0 +1,197 @@
+/*
+ * Regulator driver for PWM Regulators
+ *
+ * Copyright (C) 2014 - STMicroelectronics Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pwm.h>
+
+struct pwm_regulator_data {
+ struct regulator_desc desc;
+ struct pwm_voltages *duty_cycle_table;
+ struct pwm_device *pwm;
+ bool enabled;
+ int state;
+};
+
+struct pwm_voltages {
+ unsigned int uV;
+ unsigned int dutycycle;
+};
+
+static int pwm_regulator_get_voltage_sel(struct regulator_dev *dev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ return drvdata->state;
+}
+
+static int pwm_regulator_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+ unsigned int pwm_reg_period;
+ int dutycycle;
+ int ret;
+
+ pwm_reg_period = pwm_get_period(drvdata->pwm);
+
+ dutycycle = (pwm_reg_period *
+ drvdata->duty_cycle_table[selector].dutycycle) / 100;
+
+ ret = pwm_config(drvdata->pwm, dutycycle, pwm_reg_period);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to configure PWM\n");
+ return ret;
+ }
+
+ drvdata->state = selector;
+
+ if (!drvdata->enabled) {
+ ret = pwm_enable(drvdata->pwm);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to enable PWM\n");
+ return ret;
+ }
+ drvdata->enabled = true;
+ }
+
+ return 0;
+}
+
+static int pwm_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ if (selector >= drvdata->desc.n_voltages)
+ return -EINVAL;
+
+ return drvdata->duty_cycle_table[selector].uV;
+}
+
+static struct regulator_ops pwm_regulator_voltage_ops = {
+ .set_voltage_sel = pwm_regulator_set_voltage_sel,
+ .get_voltage_sel = pwm_regulator_get_voltage_sel,
+ .list_voltage = pwm_regulator_list_voltage,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static const struct regulator_desc pwm_regulator_desc = {
+ .name = "pwm-regulator",
+ .ops = &pwm_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .supply_name = "pwm",
+};
+
+static int pwm_regulator_probe(struct platform_device *pdev)
+{
+ struct pwm_regulator_data *drvdata;
+ struct property *prop;
+ struct regulator_dev *regulator;
+ struct regulator_config config = { };
+ struct device_node *np = pdev->dev.of_node;
+ int length, ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Device Tree node missing\n");
+ return -EINVAL;
+ }
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ memcpy(&drvdata->desc, &pwm_regulator_desc, sizeof(pwm_regulator_desc));
+
+ /* determine the number of voltage-table */
+ prop = of_find_property(np, "voltage-table", &length);
+ if (!prop) {
+ dev_err(&pdev->dev, "No voltage-table\n");
+ return -EINVAL;
+ }
+
+ if ((length < sizeof(*drvdata->duty_cycle_table)) ||
+ (length % sizeof(*drvdata->duty_cycle_table))) {
+ dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
+ length);
+ return -EINVAL;
+ }
+
+ drvdata->desc.n_voltages = length / sizeof(*drvdata->duty_cycle_table);
+
+ drvdata->duty_cycle_table = devm_kzalloc(&pdev->dev,
+ length, GFP_KERNEL);
+ if (!drvdata->duty_cycle_table)
+ return -ENOMEM;
+
+ /* read voltage table from DT property */
+ ret = of_property_read_u32_array(np, "voltage-table",
+ (u32 *)drvdata->duty_cycle_table,
+ length / sizeof(u32));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "read voltage-table failed\n");
+ return ret;
+ }
+
+ config.init_data = of_get_regulator_init_data(&pdev->dev, np);
+ if (!config.init_data)
+ return -ENOMEM;
+
+ config.of_node = np;
+ config.dev = &pdev->dev;
+ config.driver_data = drvdata;
+
+ drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(drvdata->pwm)) {
+ dev_err(&pdev->dev, "Failed to get PWM\n");
+ return PTR_ERR(drvdata->pwm);
+ }
+
+ regulator = devm_regulator_register(&pdev->dev,
+ &drvdata->desc, &config);
+ if (IS_ERR(regulator)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ drvdata->desc.name);
+ return PTR_ERR(regulator);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id pwm_of_match[] = {
+ { .compatible = "pwm-regulator" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pwm_of_match);
+
+static struct platform_driver pwm_regulator_driver = {
+ .driver = {
+ .name = "pwm-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pwm_of_match),
+ },
+ .probe = pwm_regulator_probe,
+};
+
+module_platform_driver(pwm_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
+MODULE_DESCRIPTION("PWM Regulator Driver");
+MODULE_ALIAS("platform:pwm-regulator");
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
new file mode 100644
index 000000000000..b55cd5b50ebe
--- /dev/null
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/qcom_rpm.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+#define MAX_REQUEST_LEN 2
+
+struct request_member {
+ int word;
+ unsigned int mask;
+ int shift;
+};
+
+struct rpm_reg_parts {
+ struct request_member mV; /* used if voltage is in mV */
+ struct request_member uV; /* used if voltage is in uV */
+ struct request_member ip; /* peak current in mA */
+ struct request_member pd; /* pull down enable */
+ struct request_member ia; /* average current in mA */
+ struct request_member fm; /* force mode */
+ struct request_member pm; /* power mode */
+ struct request_member pc; /* pin control */
+ struct request_member pf; /* pin function */
+ struct request_member enable_state; /* NCP and switch */
+ struct request_member comp_mode; /* NCP */
+ struct request_member freq; /* frequency: NCP and SMPS */
+ struct request_member freq_clk_src; /* clock source: SMPS */
+ struct request_member hpm; /* switch: control OCP and SS */
+ int request_len;
+};
+
+#define FORCE_MODE_IS_2_BITS(reg) \
+ (((reg)->parts->fm.mask >> (reg)->parts->fm.shift) == 3)
+
+struct qcom_rpm_reg {
+ struct qcom_rpm *rpm;
+
+ struct mutex lock;
+ struct device *dev;
+ struct regulator_desc desc;
+ const struct rpm_reg_parts *parts;
+
+ int resource;
+ u32 val[MAX_REQUEST_LEN];
+
+ int uV;
+ int is_enabled;
+
+ bool supports_force_mode_auto;
+ bool supports_force_mode_bypass;
+};
+
+static const struct rpm_reg_parts rpm8660_ldo_parts = {
+ .request_len = 2,
+ .mV = { 0, 0x00000FFF, 0 },
+ .ip = { 0, 0x00FFF000, 12 },
+ .fm = { 0, 0x03000000, 24 },
+ .pc = { 0, 0x3C000000, 26 },
+ .pf = { 0, 0xC0000000, 30 },
+ .pd = { 1, 0x00000001, 0 },
+ .ia = { 1, 0x00001FFE, 1 },
+};
+
+static const struct rpm_reg_parts rpm8660_smps_parts = {
+ .request_len = 2,
+ .mV = { 0, 0x00000FFF, 0 },
+ .ip = { 0, 0x00FFF000, 12 },
+ .fm = { 0, 0x03000000, 24 },
+ .pc = { 0, 0x3C000000, 26 },
+ .pf = { 0, 0xC0000000, 30 },
+ .pd = { 1, 0x00000001, 0 },
+ .ia = { 1, 0x00001FFE, 1 },
+ .freq = { 1, 0x001FE000, 13 },
+ .freq_clk_src = { 1, 0x00600000, 21 },
+};
+
+static const struct rpm_reg_parts rpm8660_switch_parts = {
+ .request_len = 1,
+ .enable_state = { 0, 0x00000001, 0 },
+ .pd = { 0, 0x00000002, 1 },
+ .pc = { 0, 0x0000003C, 2 },
+ .pf = { 0, 0x000000C0, 6 },
+ .hpm = { 0, 0x00000300, 8 },
+};
+
+static const struct rpm_reg_parts rpm8660_ncp_parts = {
+ .request_len = 1,
+ .mV = { 0, 0x00000FFF, 0 },
+ .enable_state = { 0, 0x00001000, 12 },
+ .comp_mode = { 0, 0x00002000, 13 },
+ .freq = { 0, 0x003FC000, 14 },
+};
+
+static const struct rpm_reg_parts rpm8960_ldo_parts = {
+ .request_len = 2,
+ .uV = { 0, 0x007FFFFF, 0 },
+ .pd = { 0, 0x00800000, 23 },
+ .pc = { 0, 0x0F000000, 24 },
+ .pf = { 0, 0xF0000000, 28 },
+ .ip = { 1, 0x000003FF, 0 },
+ .ia = { 1, 0x000FFC00, 10 },
+ .fm = { 1, 0x00700000, 20 },
+};
+
+static const struct rpm_reg_parts rpm8960_smps_parts = {
+ .request_len = 2,
+ .uV = { 0, 0x007FFFFF, 0 },
+ .pd = { 0, 0x00800000, 23 },
+ .pc = { 0, 0x0F000000, 24 },
+ .pf = { 0, 0xF0000000, 28 },
+ .ip = { 1, 0x000003FF, 0 },
+ .ia = { 1, 0x000FFC00, 10 },
+ .fm = { 1, 0x00700000, 20 },
+ .pm = { 1, 0x00800000, 23 },
+ .freq = { 1, 0x1F000000, 24 },
+ .freq_clk_src = { 1, 0x60000000, 29 },
+};
+
+static const struct rpm_reg_parts rpm8960_switch_parts = {
+ .request_len = 1,
+ .enable_state = { 0, 0x00000001, 0 },
+ .pd = { 0, 0x00000002, 1 },
+ .pc = { 0, 0x0000003C, 2 },
+ .pf = { 0, 0x000003C0, 6 },
+ .hpm = { 0, 0x00000C00, 10 },
+};
+
+static const struct rpm_reg_parts rpm8960_ncp_parts = {
+ .request_len = 1,
+ .uV = { 0, 0x007FFFFF, 0 },
+ .enable_state = { 0, 0x00800000, 23 },
+ .comp_mode = { 0, 0x01000000, 24 },
+ .freq = { 0, 0x3E000000, 25 },
+};
+
+/*
+ * Physically available PMIC regulator voltage ranges
+ */
+static const struct regulator_linear_range pldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 59, 12500),
+ REGULATOR_LINEAR_RANGE(1500000, 60, 123, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 124, 160, 50000),
+};
+
+static const struct regulator_linear_range nldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+};
+
+static const struct regulator_linear_range nldo1200_ranges[] = {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 59, 6250),
+ REGULATOR_LINEAR_RANGE( 750000, 60, 123, 12500),
+};
+
+static const struct regulator_linear_range smps_ranges[] = {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 29, 12500),
+ REGULATOR_LINEAR_RANGE( 750000, 30, 89, 12500),
+ REGULATOR_LINEAR_RANGE(1500000, 90, 153, 25000),
+};
+
+static const struct regulator_linear_range ftsmps_ranges[] = {
+ REGULATOR_LINEAR_RANGE( 350000, 0, 6, 50000),
+ REGULATOR_LINEAR_RANGE( 700000, 7, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1500000, 64, 100, 50000),
+};
+
+static const struct regulator_linear_range ncp_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1500000, 0, 31, 50000),
+};
+
+static int rpm_reg_write(struct qcom_rpm_reg *vreg,
+ const struct request_member *req,
+ const int value)
+{
+ if (WARN_ON((value << req->shift) & ~req->mask))
+ return -EINVAL;
+
+ vreg->val[req->word] &= ~req->mask;
+ vreg->val[req->word] |= value << req->shift;
+
+ return qcom_rpm_write(vreg->rpm,
+ vreg->resource,
+ vreg->val,
+ vreg->parts->request_len);
+}
+
+static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->mV;
+ int ret = 0;
+ int uV;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ uV = regulator_list_voltage_linear_range(rdev, selector);
+ if (uV < 0)
+ return uV;
+
+ mutex_lock(&vreg->lock);
+ vreg->uV = uV;
+ if (vreg->is_enabled)
+ ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->uV;
+ int ret = 0;
+ int uV;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ uV = regulator_list_voltage_linear_range(rdev, selector);
+ if (uV < 0)
+ return uV;
+
+ mutex_lock(&vreg->lock);
+ vreg->uV = uV;
+ if (vreg->is_enabled)
+ ret = rpm_reg_write(vreg, req, vreg->uV);
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_get_voltage(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->uV;
+}
+
+static int rpm_reg_mV_enable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->mV;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
+ if (!ret)
+ vreg->is_enabled = 1;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_uV_enable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->uV;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, vreg->uV);
+ if (!ret)
+ vreg->is_enabled = 1;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_switch_enable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->enable_state;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, 1);
+ if (!ret)
+ vreg->is_enabled = 1;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_mV_disable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->mV;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, 0);
+ if (!ret)
+ vreg->is_enabled = 0;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_uV_disable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->uV;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, 0);
+ if (!ret)
+ vreg->is_enabled = 0;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_switch_disable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ const struct rpm_reg_parts *parts = vreg->parts;
+ const struct request_member *req = &parts->enable_state;
+ int ret;
+
+ if (req->mask == 0)
+ return -EINVAL;
+
+ mutex_lock(&vreg->lock);
+ ret = rpm_reg_write(vreg, req, 0);
+ if (!ret)
+ vreg->is_enabled = 0;
+ mutex_unlock(&vreg->lock);
+
+ return ret;
+}
+
+static int rpm_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->is_enabled;
+}
+
+static struct regulator_ops uV_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+
+ .set_voltage_sel = rpm_reg_set_uV_sel,
+ .get_voltage = rpm_reg_get_voltage,
+
+ .enable = rpm_reg_uV_enable,
+ .disable = rpm_reg_uV_disable,
+ .is_enabled = rpm_reg_is_enabled,
+};
+
+static struct regulator_ops mV_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+
+ .set_voltage_sel = rpm_reg_set_mV_sel,
+ .get_voltage = rpm_reg_get_voltage,
+
+ .enable = rpm_reg_mV_enable,
+ .disable = rpm_reg_mV_disable,
+ .is_enabled = rpm_reg_is_enabled,
+};
+
+static struct regulator_ops switch_ops = {
+ .enable = rpm_reg_switch_enable,
+ .disable = rpm_reg_switch_disable,
+ .is_enabled = rpm_reg_is_enabled,
+};
+
+/*
+ * PM8058 regulators
+ */
+static const struct qcom_rpm_reg pm8058_pldo = {
+ .desc.linear_ranges = pldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
+ .desc.n_voltages = 161,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8058_nldo = {
+ .desc.linear_ranges = nldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
+ .desc.n_voltages = 64,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8058_smps = {
+ .desc.linear_ranges = smps_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
+ .desc.n_voltages = 154,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_smps_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8058_ncp = {
+ .desc.linear_ranges = ncp_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
+ .desc.n_voltages = 32,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_ncp_parts,
+};
+
+static const struct qcom_rpm_reg pm8058_switch = {
+ .desc.ops = &switch_ops,
+ .parts = &rpm8660_switch_parts,
+};
+
+/*
+ * PM8901 regulators
+ */
+static const struct qcom_rpm_reg pm8901_pldo = {
+ .desc.linear_ranges = pldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
+ .desc.n_voltages = 161,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = true,
+};
+
+static const struct qcom_rpm_reg pm8901_nldo = {
+ .desc.linear_ranges = nldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
+ .desc.n_voltages = 64,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = true,
+};
+
+static const struct qcom_rpm_reg pm8901_ftsmps = {
+ .desc.linear_ranges = ftsmps_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
+ .desc.n_voltages = 101,
+ .desc.ops = &mV_ops,
+ .parts = &rpm8660_smps_parts,
+ .supports_force_mode_auto = true,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8901_switch = {
+ .desc.ops = &switch_ops,
+ .parts = &rpm8660_switch_parts,
+};
+
+/*
+ * PM8921 regulators
+ */
+static const struct qcom_rpm_reg pm8921_pldo = {
+ .desc.linear_ranges = pldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
+ .desc.n_voltages = 161,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = true,
+};
+
+static const struct qcom_rpm_reg pm8921_nldo = {
+ .desc.linear_ranges = nldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
+ .desc.n_voltages = 64,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = true,
+};
+
+static const struct qcom_rpm_reg pm8921_nldo1200 = {
+ .desc.linear_ranges = nldo1200_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(nldo1200_ranges),
+ .desc.n_voltages = 124,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = true,
+};
+
+static const struct qcom_rpm_reg pm8921_smps = {
+ .desc.linear_ranges = smps_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
+ .desc.n_voltages = 154,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_smps_parts,
+ .supports_force_mode_auto = true,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8921_ftsmps = {
+ .desc.linear_ranges = ftsmps_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
+ .desc.n_voltages = 101,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_smps_parts,
+ .supports_force_mode_auto = true,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8921_ncp = {
+ .desc.linear_ranges = ncp_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
+ .desc.n_voltages = 32,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ncp_parts,
+};
+
+static const struct qcom_rpm_reg pm8921_switch = {
+ .desc.ops = &switch_ops,
+ .parts = &rpm8960_switch_parts,
+};
+
+static const struct of_device_id rpm_of_match[] = {
+ { .compatible = "qcom,rpm-pm8058-pldo", .data = &pm8058_pldo },
+ { .compatible = "qcom,rpm-pm8058-nldo", .data = &pm8058_nldo },
+ { .compatible = "qcom,rpm-pm8058-smps", .data = &pm8058_smps },
+ { .compatible = "qcom,rpm-pm8058-ncp", .data = &pm8058_ncp },
+ { .compatible = "qcom,rpm-pm8058-switch", .data = &pm8058_switch },
+
+ { .compatible = "qcom,rpm-pm8901-pldo", .data = &pm8901_pldo },
+ { .compatible = "qcom,rpm-pm8901-nldo", .data = &pm8901_nldo },
+ { .compatible = "qcom,rpm-pm8901-ftsmps", .data = &pm8901_ftsmps },
+ { .compatible = "qcom,rpm-pm8901-switch", .data = &pm8901_switch },
+
+ { .compatible = "qcom,rpm-pm8921-pldo", .data = &pm8921_pldo },
+ { .compatible = "qcom,rpm-pm8921-nldo", .data = &pm8921_nldo },
+ { .compatible = "qcom,rpm-pm8921-nldo1200", .data = &pm8921_nldo1200 },
+ { .compatible = "qcom,rpm-pm8921-smps", .data = &pm8921_smps },
+ { .compatible = "qcom,rpm-pm8921-ftsmps", .data = &pm8921_ftsmps },
+ { .compatible = "qcom,rpm-pm8921-ncp", .data = &pm8921_ncp },
+ { .compatible = "qcom,rpm-pm8921-switch", .data = &pm8921_switch },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_of_match);
+
+static int rpm_reg_set(struct qcom_rpm_reg *vreg,
+ const struct request_member *req,
+ const int value)
+{
+ if (req->mask == 0 || (value << req->shift) & ~req->mask)
+ return -EINVAL;
+
+ vreg->val[req->word] &= ~req->mask;
+ vreg->val[req->word] |= value << req->shift;
+
+ return 0;
+}
+
+static int rpm_reg_of_parse_freq(struct device *dev, struct qcom_rpm_reg *vreg)
+{
+ static const int freq_table[] = {
+ 19200000, 9600000, 6400000, 4800000, 3840000, 3200000, 2740000,
+ 2400000, 2130000, 1920000, 1750000, 1600000, 1480000, 1370000,
+ 1280000, 1200000,
+
+ };
+ const char *key;
+ u32 freq;
+ int ret;
+ int i;
+
+ key = "qcom,switch-mode-frequency";
+ ret = of_property_read_u32(dev->of_node, key, &freq);
+ if (ret) {
+ dev_err(dev, "regulator requires %s property\n", key);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(freq_table); i++) {
+ if (freq == freq_table[i]) {
+ rpm_reg_set(vreg, &vreg->parts->freq, i + 1);
+ return 0;
+ }
+ }
+
+ dev_err(dev, "invalid frequency %d\n", freq);
+ return -EINVAL;
+}
+
+static int rpm_reg_probe(struct platform_device *pdev)
+{
+ struct regulator_init_data *initdata;
+ const struct qcom_rpm_reg *template;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct qcom_rpm_reg *vreg;
+ const char *key;
+ u32 force_mode;
+ bool pwm;
+ u32 val;
+ int ret;
+
+ match = of_match_device(rpm_of_match, &pdev->dev);
+ template = match->data;
+
+ initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!initdata)
+ return -EINVAL;
+
+ vreg = devm_kmalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(&pdev->dev, "failed to allocate vreg\n");
+ return -ENOMEM;
+ }
+ memcpy(vreg, template, sizeof(*vreg));
+ mutex_init(&vreg->lock);
+ vreg->dev = &pdev->dev;
+ vreg->desc.id = -1;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.name = pdev->dev.of_node->name;
+
+ vreg->rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!vreg->rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ key = "reg";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return ret;
+ }
+ vreg->resource = val;
+
+ if ((vreg->parts->uV.mask || vreg->parts->mV.mask) &&
+ (!initdata->constraints.min_uV || !initdata->constraints.max_uV)) {
+ dev_err(&pdev->dev, "no voltage specified for regulator\n");
+ return -EINVAL;
+ }
+
+ key = "bias-pull-down";
+ if (of_property_read_bool(pdev->dev.of_node, key)) {
+ ret = rpm_reg_set(vreg, &vreg->parts->pd, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "%s is invalid", key);
+ return ret;
+ }
+ }
+
+ if (vreg->parts->freq.mask) {
+ ret = rpm_reg_of_parse_freq(&pdev->dev, vreg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vreg->parts->pm.mask) {
+ key = "qcom,power-mode-hysteretic";
+ pwm = !of_property_read_bool(pdev->dev.of_node, key);
+
+ ret = rpm_reg_set(vreg, &vreg->parts->pm, pwm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set power mode\n");
+ return ret;
+ }
+ }
+
+ if (vreg->parts->fm.mask) {
+ force_mode = -1;
+
+ key = "qcom,force-mode";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &val);
+ if (ret == -EINVAL) {
+ val = QCOM_RPM_FORCE_MODE_NONE;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return ret;
+ }
+
+ /*
+ * If force-mode is encoded as 2 bits then the
+ * possible register values are:
+ * NONE, LPM, HPM
+ * otherwise:
+ * NONE, LPM, AUTO, HPM, BYPASS
+ */
+ switch (val) {
+ case QCOM_RPM_FORCE_MODE_NONE:
+ force_mode = 0;
+ break;
+ case QCOM_RPM_FORCE_MODE_LPM:
+ force_mode = 1;
+ break;
+ case QCOM_RPM_FORCE_MODE_HPM:
+ if (FORCE_MODE_IS_2_BITS(vreg))
+ force_mode = 2;
+ else
+ force_mode = 3;
+ break;
+ case QCOM_RPM_FORCE_MODE_AUTO:
+ if (vreg->supports_force_mode_auto)
+ force_mode = 2;
+ break;
+ case QCOM_RPM_FORCE_MODE_BYPASS:
+ if (vreg->supports_force_mode_bypass)
+ force_mode = 4;
+ break;
+ }
+
+ if (force_mode < 0) {
+ dev_err(&pdev->dev, "invalid force mode\n");
+ return -EINVAL;
+ }
+
+ ret = rpm_reg_set(vreg, &vreg->parts->fm, force_mode);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set force mode\n");
+ return ret;
+ }
+ }
+
+ config.dev = &pdev->dev;
+ config.init_data = initdata;
+ config.driver_data = vreg;
+ config.of_node = pdev->dev.of_node;
+ rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "can't register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rpm_reg_driver = {
+ .probe = rpm_reg_probe,
+ .driver = {
+ .name = "qcom_rpm_reg",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rpm_of_match),
+ },
+};
+
+static int __init rpm_reg_init(void)
+{
+ return platform_driver_register(&rpm_reg_driver);
+}
+subsys_initcall(rpm_reg_init);
+
+static void __exit rpm_reg_exit(void)
+{
+ platform_driver_unregister(&rpm_reg_driver);
+}
+module_exit(rpm_reg_exit)
+
+MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
new file mode 100644
index 000000000000..e305416d7697
--- /dev/null
+++ b/drivers/regulator/rk808-regulator.c
@@ -0,0 +1,381 @@
+/*
+ * Regulator driver for Rockchip RK808
+ *
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ * Author: Zhang Qing <zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mfd/rk808.h>
+#include <linux/of_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* Field Definitions */
+#define RK808_BUCK_VSEL_MASK 0x3f
+#define RK808_BUCK4_VSEL_MASK 0xf
+#define RK808_LDO_VSEL_MASK 0x1f
+
+/* Ramp rate definitions for buck1 / buck2 only */
+#define RK808_RAMP_RATE_OFFSET 3
+#define RK808_RAMP_RATE_MASK (3 << RK808_RAMP_RATE_OFFSET)
+#define RK808_RAMP_RATE_2MV_PER_US (0 << RK808_RAMP_RATE_OFFSET)
+#define RK808_RAMP_RATE_4MV_PER_US (1 << RK808_RAMP_RATE_OFFSET)
+#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET)
+#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET)
+
+static const int rk808_buck_config_regs[] = {
+ RK808_BUCK1_CONFIG_REG,
+ RK808_BUCK2_CONFIG_REG,
+ RK808_BUCK3_CONFIG_REG,
+ RK808_BUCK4_CONFIG_REG,
+};
+
+static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500),
+};
+
+static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 15, 100000),
+};
+
+static const struct regulator_linear_range rk808_ldo_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 16, 100000),
+};
+
+static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
+ REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
+};
+
+static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
+};
+
+static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
+ unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
+ RK808_ID_DCDC1];
+
+ switch (ramp_delay) {
+ case 1 ... 2000:
+ ramp_value = RK808_RAMP_RATE_2MV_PER_US;
+ break;
+ case 2001 ... 4000:
+ ramp_value = RK808_RAMP_RATE_4MV_PER_US;
+ break;
+ case 4001 ... 6000:
+ ramp_value = RK808_RAMP_RATE_6MV_PER_US;
+ break;
+ case 6001 ... 10000:
+ break;
+ default:
+ pr_warn("%s ramp_delay: %d not supported, setting 10000\n",
+ rdev->desc->name, ramp_delay);
+ }
+
+ return regmap_update_bits(rdev->regmap, reg,
+ RK808_RAMP_RATE_MASK, ramp_value);
+}
+
+static struct regulator_ops rk808_buck1_2_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_ramp_delay = rk808_set_ramp_delay,
+};
+
+static struct regulator_ops rk808_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static struct regulator_ops rk808_switch_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc rk808_reg[] = {
+ {
+ .name = "DCDC_REG1",
+ .supply_name = "vcc1",
+ .id = RK808_ID_DCDC1,
+ .ops = &rk808_buck1_2_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 64,
+ .linear_ranges = rk808_buck_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
+ .vsel_reg = RK808_BUCK1_ON_VSEL_REG,
+ .vsel_mask = RK808_BUCK_VSEL_MASK,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(0),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "DCDC_REG2",
+ .supply_name = "vcc2",
+ .id = RK808_ID_DCDC2,
+ .ops = &rk808_buck1_2_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 64,
+ .linear_ranges = rk808_buck_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
+ .vsel_reg = RK808_BUCK2_ON_VSEL_REG,
+ .vsel_mask = RK808_BUCK_VSEL_MASK,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(1),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "DCDC_REG3",
+ .supply_name = "vcc3",
+ .id = RK808_ID_DCDC3,
+ .ops = &rk808_switch_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(2),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "DCDC_REG4",
+ .supply_name = "vcc4",
+ .id = RK808_ID_DCDC4,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 16,
+ .linear_ranges = rk808_buck4_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_buck4_voltage_ranges),
+ .vsel_reg = RK808_BUCK4_ON_VSEL_REG,
+ .vsel_mask = RK808_BUCK4_VSEL_MASK,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(3),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG1",
+ .supply_name = "vcc6",
+ .id = RK808_ID_LDO1,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 17,
+ .linear_ranges = rk808_ldo_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
+ .vsel_reg = RK808_LDO1_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(0),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG2",
+ .supply_name = "vcc6",
+ .id = RK808_ID_LDO2,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 17,
+ .linear_ranges = rk808_ldo_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
+ .vsel_reg = RK808_LDO2_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(1),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG3",
+ .supply_name = "vcc7",
+ .id = RK808_ID_LDO3,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 16,
+ .linear_ranges = rk808_ldo3_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo3_voltage_ranges),
+ .vsel_reg = RK808_LDO3_ON_VSEL_REG,
+ .vsel_mask = RK808_BUCK4_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(2),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG4",
+ .supply_name = "vcc9",
+ .id = RK808_ID_LDO4,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 17,
+ .linear_ranges = rk808_ldo_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
+ .vsel_reg = RK808_LDO4_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(3),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG5",
+ .supply_name = "vcc9",
+ .id = RK808_ID_LDO5,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 17,
+ .linear_ranges = rk808_ldo_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
+ .vsel_reg = RK808_LDO5_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(4),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG6",
+ .supply_name = "vcc10",
+ .id = RK808_ID_LDO6,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 18,
+ .linear_ranges = rk808_ldo6_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
+ .vsel_reg = RK808_LDO6_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(5),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG7",
+ .supply_name = "vcc7",
+ .id = RK808_ID_LDO7,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 18,
+ .linear_ranges = rk808_ldo6_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
+ .vsel_reg = RK808_LDO7_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(6),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG8",
+ .supply_name = "vcc11",
+ .id = RK808_ID_LDO8,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 17,
+ .linear_ranges = rk808_ldo_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
+ .vsel_reg = RK808_LDO8_ON_VSEL_REG,
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(7),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "SWITCH_REG1",
+ .supply_name = "vcc8",
+ .id = RK808_ID_SWITCH1,
+ .ops = &rk808_switch_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(5),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "SWITCH_REG2",
+ .supply_name = "vcc12",
+ .id = RK808_ID_SWITCH2,
+ .ops = &rk808_switch_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = RK808_DCDC_EN_REG,
+ .enable_mask = BIT(6),
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct of_regulator_match rk808_reg_matches[] = {
+ [RK808_ID_DCDC1] = { .name = "DCDC_REG1" },
+ [RK808_ID_DCDC2] = { .name = "DCDC_REG2" },
+ [RK808_ID_DCDC3] = { .name = "DCDC_REG3" },
+ [RK808_ID_DCDC4] = { .name = "DCDC_REG4" },
+ [RK808_ID_LDO1] = { .name = "LDO_REG1" },
+ [RK808_ID_LDO2] = { .name = "LDO_REG2" },
+ [RK808_ID_LDO3] = { .name = "LDO_REG3" },
+ [RK808_ID_LDO4] = { .name = "LDO_REG4" },
+ [RK808_ID_LDO5] = { .name = "LDO_REG5" },
+ [RK808_ID_LDO6] = { .name = "LDO_REG6" },
+ [RK808_ID_LDO7] = { .name = "LDO_REG7" },
+ [RK808_ID_LDO8] = { .name = "LDO_REG8" },
+ [RK808_ID_SWITCH1] = { .name = "SWITCH_REG1" },
+ [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" },
+};
+
+static int rk808_regulator_probe(struct platform_device *pdev)
+{
+ struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+ struct i2c_client *client = rk808->i2c;
+ struct device_node *reg_np;
+ struct regulator_config config = {};
+ struct regulator_dev *rk808_rdev;
+ int ret, i;
+
+ reg_np = of_get_child_by_name(client->dev.of_node, "regulators");
+ if (!reg_np)
+ return -ENXIO;
+
+ ret = of_regulator_match(&pdev->dev, reg_np, rk808_reg_matches,
+ RK808_NUM_REGULATORS);
+ of_node_put(reg_np);
+ if (ret < 0)
+ return ret;
+
+ /* Instantiate the regulators */
+ for (i = 0; i < RK808_NUM_REGULATORS; i++) {
+ if (!rk808_reg_matches[i].init_data ||
+ !rk808_reg_matches[i].of_node)
+ continue;
+
+ config.dev = &client->dev;
+ config.driver_data = rk808;
+ config.regmap = rk808->regmap;
+ config.of_node = rk808_reg_matches[i].of_node;
+ config.init_data = rk808_reg_matches[i].init_data;
+
+ rk808_rdev = devm_regulator_register(&pdev->dev,
+ &rk808_reg[i], &config);
+ if (IS_ERR(rk808_rdev)) {
+ dev_err(&client->dev,
+ "failed to register %d regulator\n", i);
+ return PTR_ERR(rk808_rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver rk808_regulator_driver = {
+ .probe = rk808_regulator_probe,
+ .driver = {
+ .name = "rk808-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(rk808_regulator_driver);
+
+MODULE_DESCRIPTION("regulator driver for the rk808 series PMICs");
+MODULE_AUTHOR("Chris Zhong<zyw@rock-chips.com>");
+MODULE_AUTHOR("Zhang Qing<zhangqing@rock-chips.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rk808-regulator");
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
new file mode 100644
index 000000000000..e58d79aeb393
--- /dev/null
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -0,0 +1,143 @@
+/*
+ * Regulator driver for Ricoh RN5T618 PMIC
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mfd/rn5t618.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+static struct regulator_ops rn5t618_reg_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
+ [RN5T618_##rid] = { \
+ .name = #rid, \
+ .id = RN5T618_##rid, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .ops = &rn5t618_reg_ops, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .enable_reg = RN5T618_##ereg, \
+ .enable_mask = (emask), \
+ .vsel_reg = RN5T618_##vreg, \
+ .vsel_mask = (vmask), \
+ }
+
+static struct regulator_desc rn5t618_regulators[] = {
+ /* DCDC */
+ REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
+ REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
+ REG(DCDC3, DC3CTL, BIT(0), DC3DAC, 0xff, 600000, 3500000, 12500),
+ /* LDO */
+ REG(LDO1, LDOEN1, BIT(0), LDO1DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO2, LDOEN1, BIT(1), LDO2DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO3, LDOEN1, BIT(2), LDO3DAC, 0x7f, 600000, 3500000, 25000),
+ REG(LDO4, LDOEN1, BIT(3), LDO4DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO5, LDOEN1, BIT(4), LDO5DAC, 0x7f, 900000, 3500000, 25000),
+ /* LDO RTC */
+ REG(LDORTC1, LDOEN2, BIT(4), LDORTCDAC, 0x7f, 1700000, 3500000, 25000),
+ REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
+};
+
+static struct of_regulator_match rn5t618_matches[] = {
+ [RN5T618_DCDC1] = { .name = "DCDC1" },
+ [RN5T618_DCDC2] = { .name = "DCDC2" },
+ [RN5T618_DCDC3] = { .name = "DCDC3" },
+ [RN5T618_LDO1] = { .name = "LDO1" },
+ [RN5T618_LDO2] = { .name = "LDO2" },
+ [RN5T618_LDO3] = { .name = "LDO3" },
+ [RN5T618_LDO4] = { .name = "LDO4" },
+ [RN5T618_LDO5] = { .name = "LDO5" },
+ [RN5T618_LDORTC1] = { .name = "LDORTC1" },
+ [RN5T618_LDORTC2] = { .name = "LDORTC2" },
+};
+
+static int rn5t618_regulator_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np, *regulators;
+ int ret;
+
+ np = of_node_get(pdev->dev.parent->of_node);
+ if (!np)
+ return 0;
+
+ regulators = of_get_child_by_name(np, "regulators");
+ if (!regulators) {
+ dev_err(&pdev->dev, "regulators node not found\n");
+ return -EINVAL;
+ }
+
+ ret = of_regulator_match(&pdev->dev, regulators, rn5t618_matches,
+ ARRAY_SIZE(rn5t618_matches));
+ of_node_put(regulators);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error parsing regulator init data: %d\n",
+ ret);
+ }
+
+ return 0;
+}
+
+static int rn5t618_regulator_probe(struct platform_device *pdev)
+{
+ struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int ret, i;
+
+ ret = rn5t618_regulator_parse_dt(pdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < RN5T618_REG_NUM; i++) {
+ config.dev = &pdev->dev;
+ config.init_data = rn5t618_matches[i].init_data;
+ config.of_node = rn5t618_matches[i].of_node;
+ config.regmap = rn5t618->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &rn5t618_regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s regulator\n",
+ rn5t618_regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver rn5t618_regulator_driver = {
+ .probe = rn5t618_regulator_probe,
+ .driver = {
+ .name = "rn5t618-regulator",
+ },
+};
+
+module_platform_driver(rn5t618_regulator_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("RN5T618 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index ee83b4876420..4acefa6b462e 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -235,28 +235,14 @@ static struct regulator_ops s2mpa01_buck_ops = {
.set_ramp_delay = s2mpa01_set_ramp_delay,
};
-#define regulator_desc_ldo1(num) { \
+#define regulator_desc_ldo(num, step) { \
.name = "LDO"#num, \
.id = S2MPA01_LDO##num, \
.ops = &s2mpa01_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPA01_LDO_MIN, \
- .uV_step = S2MPA01_LDO_STEP1, \
- .n_voltages = S2MPA01_LDO_N_VOLTAGES, \
- .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
- .vsel_mask = S2MPA01_LDO_VSEL_MASK, \
- .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
- .enable_mask = S2MPA01_ENABLE_MASK \
-}
-#define regulator_desc_ldo2(num) { \
- .name = "LDO"#num, \
- .id = S2MPA01_LDO##num, \
- .ops = &s2mpa01_ldo_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPA01_LDO_MIN, \
- .uV_step = S2MPA01_LDO_STEP2, \
+ .min_uV = MIN_800_MV, \
+ .uV_step = step, \
.n_voltages = S2MPA01_LDO_N_VOLTAGES, \
.vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPA01_LDO_VSEL_MASK, \
@@ -270,8 +256,8 @@ static struct regulator_ops s2mpa01_buck_ops = {
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN1, \
- .uV_step = S2MPA01_BUCK_STEP1, \
+ .min_uV = MIN_600_MV, \
+ .uV_step = STEP_6_25_MV, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B1CTRL2 + (num - 1) * 2, \
@@ -286,8 +272,8 @@ static struct regulator_ops s2mpa01_buck_ops = {
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN2, \
- .uV_step = S2MPA01_BUCK_STEP1, \
+ .min_uV = MIN_800_MV, \
+ .uV_step = STEP_6_25_MV, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B5CTRL2, \
@@ -296,14 +282,14 @@ static struct regulator_ops s2mpa01_buck_ops = {
.enable_mask = S2MPA01_ENABLE_MASK \
}
-#define regulator_desc_buck6_7(num) { \
+#define regulator_desc_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN1, \
- .uV_step = S2MPA01_BUCK_STEP1, \
+ .min_uV = min, \
+ .uV_step = step, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B6CTRL2 + (num - 6) * 2, \
@@ -312,91 +298,43 @@ static struct regulator_ops s2mpa01_buck_ops = {
.enable_mask = S2MPA01_ENABLE_MASK \
}
-#define regulator_desc_buck8 { \
- .name = "BUCK8", \
- .id = S2MPA01_BUCK8, \
- .ops = &s2mpa01_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN2, \
- .uV_step = S2MPA01_BUCK_STEP2, \
- .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
- .ramp_delay = S2MPA01_RAMP_DELAY, \
- .vsel_reg = S2MPA01_REG_B8CTRL2, \
- .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
- .enable_reg = S2MPA01_REG_B8CTRL1, \
- .enable_mask = S2MPA01_ENABLE_MASK \
-}
-
-#define regulator_desc_buck9 { \
- .name = "BUCK9", \
- .id = S2MPA01_BUCK9, \
- .ops = &s2mpa01_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN4, \
- .uV_step = S2MPA01_BUCK_STEP2, \
- .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
- .ramp_delay = S2MPA01_RAMP_DELAY, \
- .vsel_reg = S2MPA01_REG_B9CTRL2, \
- .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
- .enable_reg = S2MPA01_REG_B9CTRL1, \
- .enable_mask = S2MPA01_ENABLE_MASK \
-}
-
-#define regulator_desc_buck10 { \
- .name = "BUCK10", \
- .id = S2MPA01_BUCK10, \
- .ops = &s2mpa01_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPA01_BUCK_MIN3, \
- .uV_step = S2MPA01_BUCK_STEP2, \
- .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
- .ramp_delay = S2MPA01_RAMP_DELAY, \
- .vsel_reg = S2MPA01_REG_B10CTRL2, \
- .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
- .enable_reg = S2MPA01_REG_B10CTRL1, \
- .enable_mask = S2MPA01_ENABLE_MASK \
-}
-
static struct regulator_desc regulators[] = {
- regulator_desc_ldo2(1),
- regulator_desc_ldo1(2),
- regulator_desc_ldo1(3),
- regulator_desc_ldo1(4),
- regulator_desc_ldo1(5),
- regulator_desc_ldo2(6),
- regulator_desc_ldo1(7),
- regulator_desc_ldo1(8),
- regulator_desc_ldo1(9),
- regulator_desc_ldo1(10),
- regulator_desc_ldo2(11),
- regulator_desc_ldo1(12),
- regulator_desc_ldo1(13),
- regulator_desc_ldo1(14),
- regulator_desc_ldo1(15),
- regulator_desc_ldo1(16),
- regulator_desc_ldo1(17),
- regulator_desc_ldo1(18),
- regulator_desc_ldo1(19),
- regulator_desc_ldo1(20),
- regulator_desc_ldo1(21),
- regulator_desc_ldo2(22),
- regulator_desc_ldo2(23),
- regulator_desc_ldo1(24),
- regulator_desc_ldo1(25),
- regulator_desc_ldo1(26),
+ regulator_desc_ldo(1, STEP_25_MV),
+ regulator_desc_ldo(2, STEP_50_MV),
+ regulator_desc_ldo(3, STEP_50_MV),
+ regulator_desc_ldo(4, STEP_50_MV),
+ regulator_desc_ldo(5, STEP_50_MV),
+ regulator_desc_ldo(6, STEP_25_MV),
+ regulator_desc_ldo(7, STEP_50_MV),
+ regulator_desc_ldo(8, STEP_50_MV),
+ regulator_desc_ldo(9, STEP_50_MV),
+ regulator_desc_ldo(10, STEP_50_MV),
+ regulator_desc_ldo(11, STEP_25_MV),
+ regulator_desc_ldo(12, STEP_50_MV),
+ regulator_desc_ldo(13, STEP_50_MV),
+ regulator_desc_ldo(14, STEP_50_MV),
+ regulator_desc_ldo(15, STEP_50_MV),
+ regulator_desc_ldo(16, STEP_50_MV),
+ regulator_desc_ldo(17, STEP_50_MV),
+ regulator_desc_ldo(18, STEP_50_MV),
+ regulator_desc_ldo(19, STEP_50_MV),
+ regulator_desc_ldo(20, STEP_50_MV),
+ regulator_desc_ldo(21, STEP_50_MV),
+ regulator_desc_ldo(22, STEP_25_MV),
+ regulator_desc_ldo(23, STEP_25_MV),
+ regulator_desc_ldo(24, STEP_50_MV),
+ regulator_desc_ldo(25, STEP_50_MV),
+ regulator_desc_ldo(26, STEP_50_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),
regulator_desc_buck1_4(4),
regulator_desc_buck5,
- regulator_desc_buck6_7(6),
- regulator_desc_buck6_7(7),
- regulator_desc_buck8,
- regulator_desc_buck9,
- regulator_desc_buck10,
+ regulator_desc_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_buck6_10(8, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_buck6_10(9, MIN_1500_MV, STEP_12_5_MV),
+ regulator_desc_buck6_10(10, MIN_1000_MV, STEP_12_5_MV),
};
static int s2mpa01_pmic_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index b16c53a8272f..adab82d5279f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -255,28 +255,14 @@ static struct regulator_ops s2mps11_buck_ops = {
.set_ramp_delay = s2mps11_set_ramp_delay,
};
-#define regulator_desc_s2mps11_ldo1(num) { \
+#define regulator_desc_s2mps11_ldo(num, step) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS11_LDO_MIN, \
- .uV_step = S2MPS11_LDO_STEP1, \
- .n_voltages = S2MPS11_LDO_N_VOLTAGES, \
- .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
- .vsel_mask = S2MPS11_LDO_VSEL_MASK, \
- .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
- .enable_mask = S2MPS11_ENABLE_MASK \
-}
-#define regulator_desc_s2mps11_ldo2(num) { \
- .name = "LDO"#num, \
- .id = S2MPS11_LDO##num, \
- .ops = &s2mps11_ldo_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS11_LDO_MIN, \
- .uV_step = S2MPS11_LDO_STEP2, \
+ .min_uV = MIN_800_MV, \
+ .uV_step = step, \
.n_voltages = S2MPS11_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS11_LDO_VSEL_MASK, \
@@ -290,8 +276,8 @@ static struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS11_BUCK_MIN1, \
- .uV_step = S2MPS11_BUCK_STEP1, \
+ .min_uV = MIN_600_MV, \
+ .uV_step = STEP_6_25_MV, \
.n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \
@@ -306,8 +292,8 @@ static struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS11_BUCK_MIN1, \
- .uV_step = S2MPS11_BUCK_STEP1, \
+ .min_uV = MIN_600_MV, \
+ .uV_step = STEP_6_25_MV, \
.n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B5CTRL2, \
@@ -316,14 +302,14 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck6_8(num) { \
+#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS11_BUCK_MIN1, \
- .uV_step = S2MPS11_BUCK_STEP1, \
+ .min_uV = min, \
+ .uV_step = step, \
.n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \
@@ -332,87 +318,55 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck9 { \
- .name = "BUCK9", \
- .id = S2MPS11_BUCK9, \
- .ops = &s2mps11_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS11_BUCK_MIN3, \
- .uV_step = S2MPS11_BUCK_STEP3, \
- .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
- .ramp_delay = S2MPS11_RAMP_DELAY, \
- .vsel_reg = S2MPS11_REG_B9CTRL2, \
- .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
- .enable_reg = S2MPS11_REG_B9CTRL1, \
- .enable_mask = S2MPS11_ENABLE_MASK \
-}
-
-#define regulator_desc_s2mps11_buck10 { \
- .name = "BUCK10", \
- .id = S2MPS11_BUCK10, \
- .ops = &s2mps11_buck_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS11_BUCK_MIN2, \
- .uV_step = S2MPS11_BUCK_STEP2, \
- .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
- .ramp_delay = S2MPS11_RAMP_DELAY, \
- .vsel_reg = S2MPS11_REG_B10CTRL2, \
- .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
- .enable_reg = S2MPS11_REG_B10CTRL1, \
- .enable_mask = S2MPS11_ENABLE_MASK \
-}
-
static const struct regulator_desc s2mps11_regulators[] = {
- regulator_desc_s2mps11_ldo2(1),
- regulator_desc_s2mps11_ldo1(2),
- regulator_desc_s2mps11_ldo1(3),
- regulator_desc_s2mps11_ldo1(4),
- regulator_desc_s2mps11_ldo1(5),
- regulator_desc_s2mps11_ldo2(6),
- regulator_desc_s2mps11_ldo1(7),
- regulator_desc_s2mps11_ldo1(8),
- regulator_desc_s2mps11_ldo1(9),
- regulator_desc_s2mps11_ldo1(10),
- regulator_desc_s2mps11_ldo2(11),
- regulator_desc_s2mps11_ldo1(12),
- regulator_desc_s2mps11_ldo1(13),
- regulator_desc_s2mps11_ldo1(14),
- regulator_desc_s2mps11_ldo1(15),
- regulator_desc_s2mps11_ldo1(16),
- regulator_desc_s2mps11_ldo1(17),
- regulator_desc_s2mps11_ldo1(18),
- regulator_desc_s2mps11_ldo1(19),
- regulator_desc_s2mps11_ldo1(20),
- regulator_desc_s2mps11_ldo1(21),
- regulator_desc_s2mps11_ldo2(22),
- regulator_desc_s2mps11_ldo2(23),
- regulator_desc_s2mps11_ldo1(24),
- regulator_desc_s2mps11_ldo1(25),
- regulator_desc_s2mps11_ldo1(26),
- regulator_desc_s2mps11_ldo2(27),
- regulator_desc_s2mps11_ldo1(28),
- regulator_desc_s2mps11_ldo1(29),
- regulator_desc_s2mps11_ldo1(30),
- regulator_desc_s2mps11_ldo1(31),
- regulator_desc_s2mps11_ldo1(32),
- regulator_desc_s2mps11_ldo1(33),
- regulator_desc_s2mps11_ldo1(34),
- regulator_desc_s2mps11_ldo1(35),
- regulator_desc_s2mps11_ldo1(36),
- regulator_desc_s2mps11_ldo1(37),
- regulator_desc_s2mps11_ldo1(38),
+ regulator_desc_s2mps11_ldo(1, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(2, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(3, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(4, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(5, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(6, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(7, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(8, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(9, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(10, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(11, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(12, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(13, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(14, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(15, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(16, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(17, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(18, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(19, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(20, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(21, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(22, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(23, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(24, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(25, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(26, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(27, STEP_25_MV),
+ regulator_desc_s2mps11_ldo(28, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(29, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(30, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(31, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(32, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(33, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(34, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(35, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(36, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(37, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(38, STEP_50_MV),
regulator_desc_s2mps11_buck1_4(1),
regulator_desc_s2mps11_buck1_4(2),
regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
- regulator_desc_s2mps11_buck6_8(6),
- regulator_desc_s2mps11_buck6_8(7),
- regulator_desc_s2mps11_buck6_8(8),
- regulator_desc_s2mps11_buck9,
- regulator_desc_s2mps11_buck10,
+ regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+ regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
};
static int s2mps14_regulator_enable(struct regulator_dev *rdev)
@@ -510,56 +464,29 @@ static struct regulator_ops s2mps14_reg_ops = {
.set_suspend_disable = s2mps14_regulator_set_suspend_disable,
};
-#define regulator_desc_s2mps14_ldo1(num) { \
- .name = "LDO"#num, \
- .id = S2MPS14_LDO##num, \
- .ops = &s2mps14_reg_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS14_LDO_MIN_800MV, \
- .uV_step = S2MPS14_LDO_STEP_25MV, \
- .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
- .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
- .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
- .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
- .enable_mask = S2MPS14_ENABLE_MASK \
-}
-#define regulator_desc_s2mps14_ldo2(num) { \
- .name = "LDO"#num, \
- .id = S2MPS14_LDO##num, \
- .ops = &s2mps14_reg_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS14_LDO_MIN_1800MV, \
- .uV_step = S2MPS14_LDO_STEP_25MV, \
- .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
- .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
- .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
- .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
- .enable_mask = S2MPS14_ENABLE_MASK \
-}
-#define regulator_desc_s2mps14_ldo3(num) { \
+#define regulator_desc_s2mps14_ldo(num, min, step) { \
.name = "LDO"#num, \
.id = S2MPS14_LDO##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS14_LDO_MIN_800MV, \
- .uV_step = S2MPS14_LDO_STEP_12_5MV, \
+ .min_uV = min, \
+ .uV_step = step, \
.n_voltages = S2MPS14_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS14_LDO_VSEL_MASK, \
.enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck1235(num) { \
+
+#define regulator_desc_s2mps14_buck(num, min, step) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = S2MPS14_BUCK1235_MIN_600MV, \
- .uV_step = S2MPS14_BUCK1235_STEP_6_25MV, \
+ .min_uV = min, \
+ .uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
.linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
@@ -568,54 +495,38 @@ static struct regulator_ops s2mps14_reg_ops = {
.enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck4(num) { \
- .name = "BUCK"#num, \
- .id = S2MPS14_BUCK##num, \
- .ops = &s2mps14_reg_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .min_uV = S2MPS14_BUCK4_MIN_1400MV, \
- .uV_step = S2MPS14_BUCK4_STEP_12_5MV, \
- .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
- .linear_min_sel = S2MPS14_BUCK4_START_SEL, \
- .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
- .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
- .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
- .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
- .enable_mask = S2MPS14_ENABLE_MASK \
-}
static const struct regulator_desc s2mps14_regulators[] = {
- regulator_desc_s2mps14_ldo3(1),
- regulator_desc_s2mps14_ldo3(2),
- regulator_desc_s2mps14_ldo1(3),
- regulator_desc_s2mps14_ldo1(4),
- regulator_desc_s2mps14_ldo3(5),
- regulator_desc_s2mps14_ldo3(6),
- regulator_desc_s2mps14_ldo1(7),
- regulator_desc_s2mps14_ldo2(8),
- regulator_desc_s2mps14_ldo3(9),
- regulator_desc_s2mps14_ldo3(10),
- regulator_desc_s2mps14_ldo1(11),
- regulator_desc_s2mps14_ldo2(12),
- regulator_desc_s2mps14_ldo2(13),
- regulator_desc_s2mps14_ldo2(14),
- regulator_desc_s2mps14_ldo2(15),
- regulator_desc_s2mps14_ldo2(16),
- regulator_desc_s2mps14_ldo2(17),
- regulator_desc_s2mps14_ldo2(18),
- regulator_desc_s2mps14_ldo1(19),
- regulator_desc_s2mps14_ldo1(20),
- regulator_desc_s2mps14_ldo1(21),
- regulator_desc_s2mps14_ldo3(22),
- regulator_desc_s2mps14_ldo1(23),
- regulator_desc_s2mps14_ldo2(24),
- regulator_desc_s2mps14_ldo2(25),
- regulator_desc_s2mps14_buck1235(1),
- regulator_desc_s2mps14_buck1235(2),
- regulator_desc_s2mps14_buck1235(3),
- regulator_desc_s2mps14_buck4(4),
- regulator_desc_s2mps14_buck1235(5),
+ regulator_desc_s2mps14_ldo(1, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(2, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(3, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(4, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(5, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(6, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(7, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(8, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(9, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(10, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(11, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(12, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(13, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(14, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(15, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(16, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(17, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(18, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(19, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(20, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(21, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(22, MIN_800_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
+ regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
+ regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c
new file mode 100644
index 000000000000..97aff0ccd65f
--- /dev/null
+++ b/drivers/regulator/sky81452-regulator.c
@@ -0,0 +1,130 @@
+/*
+ * sky81452-regulator.c SKY81452 regulator driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* registers */
+#define SKY81452_REG1 0x01
+#define SKY81452_REG3 0x03
+
+/* bit mask */
+#define SKY81452_LEN 0x40
+#define SKY81452_LOUT 0x1F
+
+static struct regulator_ops sky81452_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_linear_range sky81452_reg_ranges[] = {
+ REGULATOR_LINEAR_RANGE(4500000, 0, 14, 250000),
+ REGULATOR_LINEAR_RANGE(9000000, 15, 31, 1000000),
+};
+
+static const struct regulator_desc sky81452_reg = {
+ .name = "LOUT",
+ .ops = &sky81452_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = SKY81452_LOUT + 1,
+ .linear_ranges = sky81452_reg_ranges,
+ .n_linear_ranges = ARRAY_SIZE(sky81452_reg_ranges),
+ .vsel_reg = SKY81452_REG3,
+ .vsel_mask = SKY81452_LOUT,
+ .enable_reg = SKY81452_REG1,
+ .enable_mask = SKY81452_LEN,
+};
+
+#ifdef CONFIG_OF
+static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev)
+{
+ struct regulator_init_data *init_data;
+ struct device_node *np;
+
+ np = of_get_child_by_name(dev->parent->of_node, "regulator");
+ if (unlikely(!np)) {
+ dev_err(dev, "regulator node not found");
+ return NULL;
+ }
+
+ init_data = of_get_regulator_init_data(dev, np);
+
+ of_node_put(np);
+ return init_data;
+}
+#else
+static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int sky81452_reg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct regulator_init_data *init_data = dev_get_platdata(dev);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ if (!init_data) {
+ init_data = sky81452_reg_parse_dt(dev);
+ if (IS_ERR(init_data))
+ return PTR_ERR(init_data);
+ }
+
+ config.dev = dev;
+ config.init_data = init_data;
+ config.of_node = dev->of_node;
+ config.regmap = dev_get_drvdata(dev->parent);
+
+ rdev = devm_regulator_register(dev, &sky81452_reg, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static struct platform_driver sky81452_reg_driver = {
+ .driver = {
+ .name = "sky81452-regulator",
+ },
+ .probe = sky81452_reg_probe,
+};
+
+module_platform_driver(sky81452_reg_driver);
+
+MODULE_DESCRIPTION("Skyworks SKY81452 Regulator driver");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/regulator/st-pwm.c b/drivers/regulator/st-pwm.c
deleted file mode 100644
index 5ea78df449f8..000000000000
--- a/drivers/regulator/st-pwm.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Regulator driver for ST's PWM Regulators
- *
- * Copyright (C) 2014 - STMicroelectronics Inc.
- *
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pwm.h>
-
-#define ST_PWM_REG_PERIOD 8448
-
-struct st_pwm_regulator_pdata {
- const struct regulator_desc *desc;
- struct st_pwm_voltages *duty_cycle_table;
-};
-
-struct st_pwm_regulator_data {
- const struct st_pwm_regulator_pdata *pdata;
- struct pwm_device *pwm;
- bool enabled;
- int state;
-};
-
-struct st_pwm_voltages {
- unsigned int uV;
- unsigned int dutycycle;
-};
-
-static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev)
-{
- struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
-
- return drvdata->state;
-}
-
-static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev,
- unsigned selector)
-{
- struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
- int dutycycle;
- int ret;
-
- dutycycle = (ST_PWM_REG_PERIOD / 100) *
- drvdata->pdata->duty_cycle_table[selector].dutycycle;
-
- ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD);
- if (ret) {
- dev_err(&dev->dev, "Failed to configure PWM\n");
- return ret;
- }
-
- drvdata->state = selector;
-
- if (!drvdata->enabled) {
- ret = pwm_enable(drvdata->pwm);
- if (ret) {
- dev_err(&dev->dev, "Failed to enable PWM\n");
- return ret;
- }
- drvdata->enabled = true;
- }
-
- return 0;
-}
-
-static int st_pwm_regulator_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
-
- if (selector >= dev->desc->n_voltages)
- return -EINVAL;
-
- return drvdata->pdata->duty_cycle_table[selector].uV;
-}
-
-static struct regulator_ops st_pwm_regulator_voltage_ops = {
- .set_voltage_sel = st_pwm_regulator_set_voltage_sel,
- .get_voltage_sel = st_pwm_regulator_get_voltage_sel,
- .list_voltage = st_pwm_regulator_list_voltage,
- .map_voltage = regulator_map_voltage_iterate,
-};
-
-static struct st_pwm_voltages b2105_duty_cycle_table[] = {
- { .uV = 1114000, .dutycycle = 0, },
- { .uV = 1095000, .dutycycle = 10, },
- { .uV = 1076000, .dutycycle = 20, },
- { .uV = 1056000, .dutycycle = 30, },
- { .uV = 1036000, .dutycycle = 40, },
- { .uV = 1016000, .dutycycle = 50, },
- /* WARNING: Values above 50% duty-cycle cause boot failures. */
-};
-
-static const struct regulator_desc b2105_desc = {
- .name = "b2105-pwm-regulator",
- .ops = &st_pwm_regulator_voltage_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(b2105_duty_cycle_table),
- .supply_name = "pwm",
-};
-
-static const struct st_pwm_regulator_pdata b2105_info = {
- .desc = &b2105_desc,
- .duty_cycle_table = b2105_duty_cycle_table,
-};
-
-static const struct of_device_id st_pwm_of_match[] = {
- { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, },
- { },
-};
-MODULE_DEVICE_TABLE(of, st_pwm_of_match);
-
-static int st_pwm_regulator_probe(struct platform_device *pdev)
-{
- struct st_pwm_regulator_data *drvdata;
- struct regulator_dev *regulator;
- struct regulator_config config = { };
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_match;
-
- if (!np) {
- dev_err(&pdev->dev, "Device Tree node missing\n");
- return -EINVAL;
- }
-
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- of_match = of_match_device(st_pwm_of_match, &pdev->dev);
- if (!of_match) {
- dev_err(&pdev->dev, "failed to match of device\n");
- return -ENODEV;
- }
- drvdata->pdata = of_match->data;
-
- config.init_data = of_get_regulator_init_data(&pdev->dev, np);
- if (!config.init_data)
- return -ENOMEM;
-
- config.of_node = np;
- config.dev = &pdev->dev;
- config.driver_data = drvdata;
-
- drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
- if (IS_ERR(drvdata->pwm)) {
- dev_err(&pdev->dev, "Failed to get PWM\n");
- return PTR_ERR(drvdata->pwm);
- }
-
- regulator = devm_regulator_register(&pdev->dev,
- drvdata->pdata->desc, &config);
- if (IS_ERR(regulator)) {
- dev_err(&pdev->dev, "Failed to register regulator %s\n",
- drvdata->pdata->desc->name);
- return PTR_ERR(regulator);
- }
-
- return 0;
-}
-
-static struct platform_driver st_pwm_regulator_driver = {
- .driver = {
- .name = "st-pwm-regulator",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(st_pwm_of_match),
- },
- .probe = st_pwm_regulator_probe,
-};
-
-module_platform_driver(st_pwm_regulator_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
-MODULE_DESCRIPTION("ST PWM Regulator Driver");
-MODULE_ALIAS("platform:st_pwm-regulator");
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 3ef67a86115c..7380af8bd50d 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -211,9 +211,6 @@ static int tps_65023_probe(struct i2c_client *client,
int i;
int error;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index d58db72a63b0..adbe4fc5cf07 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -27,10 +27,13 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65217.h>
-#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t, _lr, _nlr) \
+#define TPS65217_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, _vm, _em, \
+ _t, _lr, _nlr) \
{ \
.name = _name, \
.id = _id, \
+ .of_match = of_match_ptr(_of_match), \
+ .regulators_node= of_match_ptr("regulators"), \
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
@@ -138,87 +141,40 @@ static struct regulator_ops tps65217_pmic_ldo1_ops = {
};
static const struct regulator_desc regulators[] = {
- TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64,
- TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC1_EN, NULL, tps65217_uv1_ranges,
- 2), /* DCDC1 voltage range: 900000 ~ 1800000 */
- TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64,
- TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC2_EN, NULL, tps65217_uv1_ranges,
+ TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1",
+ tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1,
+ TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN,
+ NULL, tps65217_uv1_ranges, 2),
+ TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2",
+ tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2,
+ TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC2_EN,
+ NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges)),
- TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64,
- TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC3_EN, NULL, tps65217_uv1_ranges,
- 1), /* DCDC3 voltage range: 900000 ~ 1500000 */
- TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16,
- TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK,
- TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table, NULL, 0),
- TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64,
- TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK,
- TPS65217_ENABLE_LDO2_EN, NULL, tps65217_uv1_ranges,
+ TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3",
+ tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3,
+ TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN,
+ NULL, tps65217_uv1_ranges, 1),
+ TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1",
+ tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1,
+ TPS65217_DEFLDO1_LDO1_MASK, TPS65217_ENABLE_LDO1_EN,
+ LDO1_VSEL_table, NULL, 0),
+ TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, "ldo2", tps65217_pmic_ops,
+ 64, TPS65217_REG_DEFLDO2,
+ TPS65217_DEFLDO2_LDO2_MASK, TPS65217_ENABLE_LDO2_EN,
+ NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges)),
- TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32,
- TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
+ TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, "ldo3", tps65217_pmic_ops,
+ 32, TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
NULL, tps65217_uv2_ranges,
ARRAY_SIZE(tps65217_uv2_ranges)),
- TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32,
- TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
+ TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, "ldo4", tps65217_pmic_ops,
+ 32, TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
NULL, tps65217_uv2_ranges,
ARRAY_SIZE(tps65217_uv2_ranges)),
};
-#ifdef CONFIG_OF
-static struct of_regulator_match reg_matches[] = {
- { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 },
- { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 },
- { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 },
- { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 },
- { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 },
- { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 },
- { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 },
-};
-
-static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
-{
- struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
- struct device_node *node = tps->dev->of_node;
- struct tps65217_board *pdata;
- struct device_node *regs;
- int i, count;
-
- regs = of_get_child_by_name(node, "regulators");
- if (!regs)
- return NULL;
-
- count = of_regulator_match(&pdev->dev, regs, reg_matches,
- TPS65217_NUM_REGULATOR);
- of_node_put(regs);
- if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
- return NULL;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- for (i = 0; i < count; i++) {
- if (!reg_matches[i].of_node)
- continue;
-
- pdata->tps65217_init_data[i] = reg_matches[i].init_data;
- pdata->of_node[i] = reg_matches[i].of_node;
- }
-
- return pdata;
-}
-#else
-static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
static int tps65217_regulator_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
@@ -227,14 +183,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
int i;
- if (tps->dev->of_node)
- pdata = tps65217_parse_dt(pdev);
-
- if (!pdata) {
- dev_err(&pdev->dev, "Platform data not found\n");
- return -EINVAL;
- }
-
if (tps65217_chip_id(tps) != TPS65217) {
dev_err(&pdev->dev, "Invalid tps chip version\n");
return -ENODEV;
@@ -245,11 +193,10 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
/* Register the regulators */
config.dev = tps->dev;
- config.init_data = pdata->tps65217_init_data[i];
+ if (pdata)
+ config.init_data = pdata->tps65217_init_data[i];
config.driver_data = tps;
config.regmap = tps->regmap;
- if (tps->dev->of_node)
- config.of_node = pdata->of_node[i];
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
&config);
@@ -259,6 +206,7 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
return PTR_ERR(rdev);
}
}
+
return 0;
}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e584c998b55f..18fc991175bc 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1047,7 +1047,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
*tps65910_reg_matches = matches;
for (idx = 0; idx < count; idx++) {
- if (!matches[idx].init_data || !matches[idx].of_node)
+ if (!matches[idx].of_node)
continue;
pmic_plat_data->tps65910_pmic_init_data[idx] =
@@ -1077,7 +1077,6 @@ static int tps65910_probe(struct platform_device *pdev)
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct tps_info *info;
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
struct tps65910_reg *pmic;
struct tps65910_board *pmic_plat_data;
@@ -1140,14 +1139,6 @@ static int tps65910_probe(struct platform_device *pdev)
for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
i++, info++) {
-
- reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
-
- /* Regulator API handles empty constraints but not NULL
- * constraints */
- if (!reg_data)
- continue;
-
/* Register the regulators */
pmic->info[i] = info;
@@ -1199,7 +1190,7 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].enable_mask = TPS65910_SUPPLY_STATE_ENABLED;
config.dev = tps65910->dev;
- config.init_data = reg_data;
+ config.init_data = pmic_plat_data->tps65910_pmic_init_data[i];
config.driver_data = pmic;
config.regmap = tps65910->regmap;
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 8225b89de810..c384fec6d173 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -232,6 +232,7 @@ static struct platform_driver efi_rtc_driver = {
module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
+MODULE_ALIAS("platform:rtc-efi");
MODULE_AUTHOR("dann frazier <dannf@hp.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EFI RTC driver");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bd85fb4978e0..e85e64a07d02 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -45,6 +45,17 @@ config SCSI_NETLINK
default n
depends on NET
+config SCSI_MQ_DEFAULT
+ bool "SCSI: use blk-mq I/O path by default"
+ depends on SCSI
+ ---help---
+ This option enables the new blk-mq based I/O path for SCSI
+ devices by default. With the option the scsi_mod.use_blk_mq
+ module/boot option defaults to Y, without it to N, but it can
+ still be overriden either way.
+
+ If unsure say N.
+
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 63f576c9300a..a759cb2d4b15 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1152,6 +1152,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq;
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
+ shost->use_cmd_list = 1;
aac = (struct aac_dev *)shost->hostdata;
aac->base_start = pci_resource_start(pdev, 0);
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 6739069477de..3b3d599103f8 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -70,7 +70,7 @@ config AIC79XX_DEBUG_MASK
default "0"
help
Bit mask of debug options that is only valid if the
- CONFIG_AIC79XX_DEBUG_ENBLE option is enabled. The bits in this mask
+ CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask
are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the
variable ahd_debug in that file to find them.
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 77b26f5b9c33..3bcaaac0ae4b 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,13 +45,14 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#define ARCMSR_MAX_OUTSTANDING_CMD 256
#ifdef CONFIG_XEN
#define ARCMSR_MAX_FREECCB_NUM 160
+#define ARCMSR_MAX_OUTSTANDING_CMD 155
#else
#define ARCMSR_MAX_FREECCB_NUM 320
+#define ARCMSR_MAX_OUTSTANDING_CMD 255
#endif
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05"
+#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140919"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -62,11 +63,17 @@ struct device_attribute;
#define ARCMSR_MAX_QBUFFER 4096
#define ARCMSR_DEFAULT_SG_ENTRIES 38
#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
+#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
#define PCI_DEVICE_ID_ARECA_1880 0x1880
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1214
+ #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#endif
/*
**********************************************************************************
**
@@ -100,10 +107,11 @@ struct CMD_MESSAGE
** IOP Message Transfer Data for user space
*******************************************************************************
*/
+#define ARCMSR_API_DATA_BUFLEN 1032
struct CMD_MESSAGE_FIELD
{
struct CMD_MESSAGE cmdmessage;
- uint8_t messagedatabuffer[1032];
+ uint8_t messagedatabuffer[ARCMSR_API_DATA_BUFLEN];
};
/* IOP message transfer */
#define ARCMSR_MESSAGE_FAIL 0x0001
@@ -337,6 +345,56 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000
/*
*******************************************************************************
+** SPEC. for Areca Type D adapter
+*******************************************************************************
+*/
+#define ARCMSR_ARC1214_CHIP_ID 0x00004
+#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION 0x00008
+#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK 0x00034
+#define ARCMSR_ARC1214_SAMPLE_RESET 0x00100
+#define ARCMSR_ARC1214_RESET_REQUEST 0x00108
+#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS 0x00200
+#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE 0x0020C
+#define ARCMSR_ARC1214_INBOUND_MESSAGE0 0x00400
+#define ARCMSR_ARC1214_INBOUND_MESSAGE1 0x00404
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0 0x00420
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1 0x00424
+#define ARCMSR_ARC1214_INBOUND_DOORBELL 0x00460
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL 0x00480
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE 0x00484
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW 0x01000
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH 0x01004
+#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER 0x01018
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW 0x01060
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH 0x01064
+#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER 0x0106C
+#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER 0x01070
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE 0x01088
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE 0x0108C
+#define ARCMSR_ARC1214_MESSAGE_WBUFFER 0x02000
+#define ARCMSR_ARC1214_MESSAGE_RBUFFER 0x02100
+#define ARCMSR_ARC1214_MESSAGE_RWBUFFER 0x02200
+/* Host Interrupt Mask */
+#define ARCMSR_ARC1214_ALL_INT_ENABLE 0x00001010
+#define ARCMSR_ARC1214_ALL_INT_DISABLE 0x00000000
+/* Host Interrupt Status */
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR 0x00001000
+#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR 0x00000010
+/* DoorBell*/
+#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY 0x00000001
+#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ 0x00000002
+/*inbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 0x00000001
+/*outbound DATA WRITE isr door bell clear*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 0x00000002
+/*outbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE 0x02000000
+/*outbound message cmd isr door bell clear*/
+/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
+#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK 0x80000000
+#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
@@ -357,7 +415,7 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
uint8_t msgPages;
- uint32_t Context;
+ uint32_t msgContext;
uint32_t DataLength;
uint8_t Cdb[16];
uint8_t DeviceStatus;
@@ -494,6 +552,56 @@ struct MessageUnit_C{
uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type D processor
+*********************************************************************
+*/
+struct InBound_SRB {
+ uint32_t addressLow; /* pointer to SRB block */
+ uint32_t addressHigh;
+ uint32_t length; /* in DWORDs */
+ uint32_t reserved0;
+};
+
+struct OutBound_SRB {
+ uint32_t addressLow; /* pointer to SRB block */
+ uint32_t addressHigh;
+};
+
+struct MessageUnit_D {
+ struct InBound_SRB post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE];
+ volatile struct OutBound_SRB
+ done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE];
+ u16 postq_index;
+ volatile u16 doneq_index;
+ u32 __iomem *chip_id; /* 0x00004 */
+ u32 __iomem *cpu_mem_config; /* 0x00008 */
+ u32 __iomem *i2o_host_interrupt_mask; /* 0x00034 */
+ u32 __iomem *sample_at_reset; /* 0x00100 */
+ u32 __iomem *reset_request; /* 0x00108 */
+ u32 __iomem *host_int_status; /* 0x00200 */
+ u32 __iomem *pcief0_int_enable; /* 0x0020C */
+ u32 __iomem *inbound_msgaddr0; /* 0x00400 */
+ u32 __iomem *inbound_msgaddr1; /* 0x00404 */
+ u32 __iomem *outbound_msgaddr0; /* 0x00420 */
+ u32 __iomem *outbound_msgaddr1; /* 0x00424 */
+ u32 __iomem *inbound_doorbell; /* 0x00460 */
+ u32 __iomem *outbound_doorbell; /* 0x00480 */
+ u32 __iomem *outbound_doorbell_enable; /* 0x00484 */
+ u32 __iomem *inboundlist_base_low; /* 0x01000 */
+ u32 __iomem *inboundlist_base_high; /* 0x01004 */
+ u32 __iomem *inboundlist_write_pointer; /* 0x01018 */
+ u32 __iomem *outboundlist_base_low; /* 0x01060 */
+ u32 __iomem *outboundlist_base_high; /* 0x01064 */
+ u32 __iomem *outboundlist_copy_pointer; /* 0x0106C */
+ u32 __iomem *outboundlist_read_pointer; /* 0x01070 0x01072 */
+ u32 __iomem *outboundlist_interrupt_cause; /* 0x1088 */
+ u32 __iomem *outboundlist_interrupt_enable; /* 0x108C */
+ u32 __iomem *message_wbuffer; /* 0x2000 */
+ u32 __iomem *message_rbuffer; /* 0x2100 */
+ u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
+};
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
@@ -505,19 +613,26 @@ struct AdapterControlBlock
#define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
#define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
#define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
+ u32 roundup_ccbsize;
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
+ struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
uint32_t reg_mu_acc_handle0;
spinlock_t eh_lock;
spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
+ struct MessageUnit_D *pmuD;
};
/* message unit ATU inbound base address0 */
void __iomem *mem_base0;
@@ -544,6 +659,8 @@ struct AdapterControlBlock
/* iop init */
#define ACB_F_ABORT 0x0200
#define ACB_F_FIRMWARE_TRAP 0x0400
+ #define ACB_F_MSI_ENABLED 0x1000
+ #define ACB_F_MSIX_ENABLED 0x2000
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -557,19 +674,20 @@ struct AdapterControlBlock
/* dma_coherent used for memory free */
dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle_hbb_mu;
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
unsigned int uncache_size;
uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_firstindex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_lastindex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_firstindex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_lastindex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
@@ -594,6 +712,8 @@ struct AdapterControlBlock
#define FW_DEADLOCK 0x0010
atomic_t rq_map_token;
atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int msix_vector_count;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -606,7 +726,7 @@ struct CommandControlBlock{
struct list_head list; /*x32: 8byte, x64: 16byte*/
struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr_pattern; /*x32: 4byte, x64: 4byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
#define CCB_FLAG_READ 0x0000
@@ -684,8 +804,10 @@ struct SENSE_DATA
#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
-extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *);
-extern void arcmsr_iop_message_read(struct AdapterControlBlock *);
+extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
+extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
+ struct QBUFFER __iomem *);
+extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
extern struct device_attribute *arcmsr_host_attrs[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index acdae33de521..9c86481f779f 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -50,6 +50,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/circ_buf.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -68,42 +69,42 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
- uint8_t *pQbuffer,*ptmpQbuffer;
+ uint8_t *ptmpQbuffer;
int32_t allxfer_len = 0;
+ unsigned long flags;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* do message unit read. */
ptmpQbuffer = (uint8_t *)buf;
- while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
- && (allxfer_len < 1031)) {
- pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
- memcpy(ptmpQbuffer, pQbuffer, 1);
- acb->rqbuf_firstindex++;
- acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
- ptmpQbuffer++;
- allxfer_len++;
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+ unsigned int tail = acb->rqbuf_getIndex;
+ unsigned int head = acb->rqbuf_putIndex;
+ unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+ allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+ if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+ allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+ if (allxfer_len <= cnt_to_end)
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+ else {
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+ memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+ }
+ acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
- uint8_t __iomem *iop_data;
- int32_t iop_len;
-
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = prbuffer->data;
- iop_len = readl(&prbuffer->data_len);
- while (iop_len > 0) {
- acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
- acb->rqbuf_lastindex++;
- acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- iop_len--;
- }
- arcmsr_iop_message_read(acb);
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
}
- return (allxfer_len);
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ return allxfer_len;
}
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
@@ -115,43 +116,42 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
- int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
+ int32_t user_len, cnt2end;
uint8_t *pQbuffer, *ptmpuserbuffer;
+ unsigned long flags;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (count > 1032)
+ if (count > ARCMSR_API_DATA_BUFLEN)
return -EINVAL;
/* do message unit write. */
ptmpuserbuffer = (uint8_t *)buf;
user_len = (int32_t)count;
- wqbuf_lastindex = acb->wqbuf_lastindex;
- wqbuf_firstindex = acb->wqbuf_firstindex;
- if (wqbuf_lastindex != wqbuf_firstindex) {
- arcmsr_post_ioctldata2iop(acb);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
+ arcmsr_write_ioctldata2iop(acb);
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
return 0; /*need retry*/
} else {
- my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
- &(ARCMSR_MAX_QBUFFER - 1);
- if (my_empty_len >= user_len) {
- while (user_len > 0) {
- pQbuffer =
- &acb->wqbuffer[acb->wqbuf_lastindex];
- memcpy(pQbuffer, ptmpuserbuffer, 1);
- acb->wqbuf_lastindex++;
- acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
- ptmpuserbuffer++;
- user_len--;
- }
- if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
- acb->acb_flags &=
- ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
- arcmsr_post_ioctldata2iop(acb);
- }
- return count;
- } else {
- return 0; /*need retry*/
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+ cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+ if (user_len > cnt2end) {
+ memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+ ptmpuserbuffer += cnt2end;
+ user_len -= cnt2end;
+ acb->wqbuf_putIndex = 0;
+ pQbuffer = acb->wqbuffer;
+ }
+ memcpy(pQbuffer, ptmpuserbuffer, user_len);
+ acb->wqbuf_putIndex += user_len;
+ acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+ acb->acb_flags &=
+ ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ arcmsr_write_ioctldata2iop(acb);
}
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ return count;
}
}
@@ -165,22 +165,24 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer;
+ unsigned long flags;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
- }
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED
| ACB_F_MESSAGE_RQBUFFER_CLEARED
| ACB_F_MESSAGE_WQBUFFER_READED);
- acb->rqbuf_firstindex = 0;
- acb->rqbuf_lastindex = 0;
- acb->wqbuf_firstindex = 0;
- acb->wqbuf_lastindex = 0;
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
pQbuffer = acb->wqbuffer;
@@ -193,7 +195,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.name = "mu_read",
.mode = S_IRUSR ,
},
- .size = 1032,
+ .size = ARCMSR_API_DATA_BUFLEN,
.read = arcmsr_sysfs_iop_message_read,
};
@@ -202,7 +204,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.name = "mu_write",
.mode = S_IWUSR,
},
- .size = 1032,
+ .size = ARCMSR_API_DATA_BUFLEN,
.write = arcmsr_sysfs_iop_message_write,
};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index b13764ca23fd..0b44fb5ee485 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,11 +2,10 @@
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_hba.c
-** BY : Nick Cheng
-** Description: SCSI RAID Device Driver for
-** ARECA RAID Host adapter
+** BY : Nick Cheng, C.L. Huang
+** Description: SCSI RAID Device Driver for Areca RAID Controller
*******************************************************************************
-** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
+** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
** E-mail: support@areca.com.tw
@@ -59,6 +58,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/aer.h>
+#include <linux/circ_buf.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -70,15 +70,15 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsicam.h>
#include "arcmsr.h"
-MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
+MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
+MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
-wait_queue_head_t wait_q;
+static wait_queue_head_t wait_q;
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd);
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
@@ -89,25 +89,31 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
+static int arcmsr_resume(struct pci_dev *pdev);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
+ u32 intmask_org);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
-static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
-static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(unsigned long pacb);
-static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
-static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
+static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
int queue_depth, int reason)
{
@@ -122,15 +128,14 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
static struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
- .name = "ARCMSR ARECA SATA/SAS RAID Controller"
- ARCMSR_DRIVER_VERSION,
+ .name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_FREECCB_NUM,
+ .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
@@ -139,34 +144,59 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
};
+
static struct pci_device_id arcmsr_device_id_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
- {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
+ .driver_data = ACB_ADAPTER_TYPE_D},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
+ .driver_data = ACB_ADAPTER_TYPE_C},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
+
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
+ .suspend = arcmsr_suspend,
+ .resume = arcmsr_resume,
.shutdown = arcmsr_shutdown,
};
/*
@@ -174,16 +204,14 @@ static struct pci_driver arcmsr_pci_driver = {
****************************************************************************
*/
-static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
+static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A:
- case ACB_ADAPTER_TYPE_C:
+ case ACB_ADAPTER_TYPE_B:
+ case ACB_ADAPTER_TYPE_D: {
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
break;
- case ACB_ADAPTER_TYPE_B:{
- dma_free_coherent(&acb->pdev->dev,
- sizeof(struct MessageUnit_B),
- acb->pmuB, acb->dma_coherent_handle_hbb_mu);
}
}
}
@@ -229,6 +257,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
}
break;
}
+ case ACB_ADAPTER_TYPE_D: {
+ void __iomem *mem_base0;
+ unsigned long addr, range, flags;
+
+ addr = (unsigned long)pci_resource_start(pdev, 0);
+ range = pci_resource_len(pdev, 0);
+ flags = pci_resource_flags(pdev, 0);
+ if (flags & IORESOURCE_CACHEABLE)
+ mem_base0 = ioremap(addr, range);
+ else
+ mem_base0 = ioremap_nocache(addr, range);
+ if (!mem_base0) {
+ pr_notice("arcmsr%d: memory mapping region fail\n",
+ acb->host->host_no);
+ return false;
+ }
+ acb->mem_base0 = mem_base0;
+ break;
+ }
}
return true;
}
@@ -249,6 +296,10 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_C:{
iounmap(acb->pmuC);
}
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ iounmap(acb->mem_base0);
+ break;
}
}
@@ -289,27 +340,7 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
return 0;
}
-static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
-{
- struct pci_dev *pdev = acb->pdev;
- u16 dev_id;
- pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
- acb->dev_id = dev_id;
- switch (dev_id) {
- case 0x1880: {
- acb->adapter_type = ACB_ADAPTER_TYPE_C;
- }
- break;
- case 0x1201: {
- acb->adapter_type = ACB_ADAPTER_TYPE_B;
- }
- break;
-
- default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
- }
-}
-
-static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
int i;
@@ -327,7 +358,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
return false;
}
-static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
int i;
@@ -347,9 +378,9 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
return false;
}
-static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
+static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
- struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
int i;
for (i = 0; i < 2000; i++) {
@@ -365,13 +396,30 @@ static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
-static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ if (readl(reg->outbound_doorbell)
+ & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+ reg->outbound_doorbell);
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
int retry_count = 30;
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
do {
- if (arcmsr_hba_wait_msgint_ready(acb))
+ if (arcmsr_hbaA_wait_msgint_ready(acb))
break;
else {
retry_count--;
@@ -381,13 +429,13 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
} while (retry_count != 0);
}
-static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
int retry_count = 30;
writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
do {
- if (arcmsr_hbb_wait_msgint_ready(acb))
+ if (arcmsr_hbaB_wait_msgint_ready(acb))
break;
else {
retry_count--;
@@ -397,14 +445,14 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
} while (retry_count != 0);
}
-static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
{
- struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
do {
- if (arcmsr_hbc_wait_msgint_ready(pACB)) {
+ if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
break;
} else {
retry_count--;
@@ -414,22 +462,44 @@ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
return;
}
+
+static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 15;
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
+ do {
+ if (arcmsr_hbaD_wait_msgint_ready(pACB))
+ break;
+
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_flush_hba_cache(acb);
+ arcmsr_hbaA_flush_cache(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- arcmsr_flush_hbb_cache(acb);
+ arcmsr_hbaB_flush_cache(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
- arcmsr_flush_hbc_cache(acb);
+ arcmsr_hbaC_flush_cache(acb);
}
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_flush_cache(acb);
+ break;
}
}
@@ -473,7 +543,16 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
- ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ case ACB_ADAPTER_TYPE_B:
+ ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ case ACB_ADAPTER_TYPE_D:
+ ccb_tmp->cdb_phyaddr = cdb_phyaddr;
+ break;
+ }
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
INIT_LIST_HEAD(&ccb_tmp->list);
@@ -486,121 +565,126 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
static void arcmsr_message_isr_bh_fn(struct work_struct *work)
{
- struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+ struct AdapterControlBlock *acb = container_of(work,
+ struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+ char *acb_dev_map = (char *)acb->device_map;
+ uint32_t __iomem *signature = NULL;
+ char __iomem *devicemap = NULL;
+ int target, lun;
+ struct scsi_device *psdev;
+ char diff, temp;
+
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_dev_map = (char *)acb->device_map;
- uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
- char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
- int target, lun;
- struct scsi_device *psdev;
- char diff;
-
- atomic_inc(&acb->rq_map_token);
- if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
- for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
- diff = (*acb_dev_map)^readb(devicemap);
- if (diff != 0) {
- char temp;
- *acb_dev_map = readb(devicemap);
- temp =*acb_dev_map;
- for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
- if((temp & 0x01)==1 && (diff & 0x01) == 1) {
- scsi_add_device(acb->host, 0, target, lun);
- }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
- psdev = scsi_device_lookup(acb->host, 0, target, lun);
- if (psdev != NULL ) {
- scsi_remove_device(psdev);
- scsi_device_put(psdev);
- }
- }
- temp >>= 1;
- diff >>= 1;
- }
- }
- devicemap++;
- acb_dev_map++;
- }
- }
- break;
- }
+ signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
- case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = acb->pmuB;
- char *acb_dev_map = (char *)acb->device_map;
- uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
- char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
- int target, lun;
- struct scsi_device *psdev;
- char diff;
-
- atomic_inc(&acb->rq_map_token);
- if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
- for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
- diff = (*acb_dev_map)^readb(devicemap);
- if (diff != 0) {
- char temp;
- *acb_dev_map = readb(devicemap);
- temp =*acb_dev_map;
- for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
- if((temp & 0x01)==1 && (diff & 0x01) == 1) {
- scsi_add_device(acb->host, 0, target, lun);
- }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
- psdev = scsi_device_lookup(acb->host, 0, target, lun);
- if (psdev != NULL ) {
- scsi_remove_device(psdev);
- scsi_device_put(psdev);
- }
- }
- temp >>= 1;
- diff >>= 1;
- }
- }
- devicemap++;
- acb_dev_map++;
- }
- }
- }
+ signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
break;
- case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = acb->pmuC;
- char *acb_dev_map = (char *)acb->device_map;
- uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
- char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
- int target, lun;
- struct scsi_device *psdev;
- char diff;
-
- atomic_inc(&acb->rq_map_token);
- if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
- for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
- diff = (*acb_dev_map)^readb(devicemap);
- if (diff != 0) {
- char temp;
- *acb_dev_map = readb(devicemap);
- temp = *acb_dev_map;
- for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
- if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
- scsi_add_device(acb->host, 0, target, lun);
- } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
- psdev = scsi_device_lookup(acb->host, 0, target, lun);
- if (psdev != NULL) {
- scsi_remove_device(psdev);
- scsi_device_put(psdev);
- }
- }
- temp >>= 1;
- diff >>= 1;
- }
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
+ }
+ atomic_inc(&acb->rq_map_token);
+ if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
+ return;
+ for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
+ target++) {
+ temp = readb(devicemap);
+ diff = (*acb_dev_map) ^ temp;
+ if (diff != 0) {
+ *acb_dev_map = temp;
+ for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
+ lun++) {
+ if ((diff & 0x01) == 1 &&
+ (temp & 0x01) == 1) {
+ scsi_add_device(acb->host,
+ 0, target, lun);
+ } else if ((diff & 0x01) == 1
+ && (temp & 0x01) == 0) {
+ psdev = scsi_device_lookup(acb->host,
+ 0, target, lun);
+ if (psdev != NULL) {
+ scsi_remove_device(psdev);
+ scsi_device_put(psdev);
}
- devicemap++;
- acb_dev_map++;
}
+ temp >>= 1;
+ diff >>= 1;
}
}
+ devicemap++;
+ acb_dev_map++;
+ }
+}
+
+static int
+arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
+{
+ int i, j, r;
+ struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
+
+ for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
+ entries[i].entry = i;
+ r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
+ if (r < 0)
+ goto msi_int;
+ acb->msix_vector_count = r;
+ for (i = 0; i < r; i++) {
+ if (request_irq(entries[i].vector,
+ arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq =%d failed!\n",
+ acb->host->host_no, entries[i].vector);
+ for (j = 0 ; j < i ; j++)
+ free_irq(entries[j].vector, acb);
+ pci_disable_msix(pdev);
+ goto msi_int;
+ }
+ acb->entries[i] = entries[i];
+ }
+ acb->acb_flags |= ACB_F_MSIX_ENABLED;
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ return SUCCESS;
+msi_int:
+ if (pci_enable_msi_exact(pdev, 1) < 0)
+ goto legacy_int;
+ if (request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_SHARED, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq =%d failed!\n",
+ acb->host->host_no, pdev->irq);
+ pci_disable_msi(pdev);
+ goto legacy_int;
+ }
+ acb->acb_flags |= ACB_F_MSI_ENABLED;
+ pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
+ return SUCCESS;
+legacy_int:
+ if (request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_SHARED, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq = %d failed!\n",
+ acb->host->host_no, pdev->irq);
+ return FAILED;
}
+ return SUCCESS;
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -637,7 +721,7 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
+ host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
@@ -649,12 +733,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
spin_lock_init(&acb->eh_lock);
spin_lock_init(&acb->ccblist_lock);
+ spin_lock_init(&acb->postq_lock);
+ spin_lock_init(&acb->doneq_lock);
+ spin_lock_init(&acb->rqbuffer_lock);
+ spin_lock_init(&acb->wqbuffer_lock);
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_RQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
INIT_LIST_HEAD(&acb->ccb_free_list);
- arcmsr_define_adapter_type(acb);
+ acb->adapter_type = id->driver_data;
error = arcmsr_remap_pciregion(acb);
if(!error){
goto pci_release_regs;
@@ -667,17 +755,13 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(error){
goto free_hbb_mu;
}
- arcmsr_iop_init(acb);
error = scsi_add_host(host, &pdev->dev);
if(error){
- goto RAID_controller_stop;
+ goto free_ccb_pool;
}
- error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
- if(error){
+ if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
- }
- host->irq = pdev->irq;
- scsi_scan_host(host);
+ arcmsr_iop_init(acb);
INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
@@ -689,16 +773,20 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
+ scsi_scan_host(host);
return 0;
out_free_sysfs:
-scsi_host_remove:
- scsi_remove_host(host);
-RAID_controller_stop:
+ del_timer_sync(&acb->eternal_timer);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
+ arcmsr_free_irq(pdev, acb);
+scsi_host_remove:
+ scsi_remove_host(host);
+free_ccb_pool:
arcmsr_free_ccb_pool(acb);
free_hbb_mu:
- arcmsr_free_hbb_mu(acb);
+ arcmsr_free_mu(acb);
unmap_pci_region:
arcmsr_unmap_pciregion(acb);
pci_release_regs:
@@ -710,75 +798,169 @@ pci_disable_dev:
return -ENODEV;
}
-static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
+static void arcmsr_free_irq(struct pci_dev *pdev,
+ struct AdapterControlBlock *acb)
+{
+ int i;
+
+ if (acb->acb_flags & ACB_F_MSI_ENABLED) {
+ free_irq(pdev->irq, acb);
+ pci_disable_msi(pdev);
+ } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
+ for (i = 0; i < acb->msix_vector_count; i++)
+ free_irq(acb->entries[i].vector, acb);
+ pci_disable_msix(pdev);
+ } else
+ free_irq(pdev->irq, acb);
+}
+
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ uint32_t intmask_org;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)host->hostdata;
+
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_free_irq(pdev, acb);
+ del_timer_sync(&acb->eternal_timer);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+ pci_set_drvdata(pdev, host);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int arcmsr_resume(struct pci_dev *pdev)
+{
+ int error;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)host->hostdata;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ pr_warn("%s: pci_enable_device error\n", __func__);
+ return -ENODEV;
+ }
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (error) {
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (error) {
+ pr_warn("scsi%d: No suitable DMA mask available\n",
+ host->host_no);
+ goto controller_unregister;
+ }
+ }
+ pci_set_master(pdev);
+ if (arcmsr_request_irq(pdev, acb) == FAILED)
+ goto controller_stop;
+ arcmsr_iop_init(acb);
+ INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ init_timer(&acb->eternal_timer);
+ acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ acb->eternal_timer.data = (unsigned long) acb;
+ acb->eternal_timer.function = &arcmsr_request_device_map;
+ add_timer(&acb->eternal_timer);
+ return 0;
+controller_stop:
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+controller_unregister:
+ scsi_remove_host(host);
+ arcmsr_free_ccb_pool(acb);
+ arcmsr_unmap_pciregion(acb);
+ pci_release_regions(pdev);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+ return -ENODEV;
+}
+
+static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
- if (!arcmsr_hba_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'abort all outstanding command' timeout \n"
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
, acb->host->host_no);
return false;
}
return true;
}
-static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'abort all outstanding command' timeout \n"
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
, acb->host->host_no);
return false;
}
return true;
}
-static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
+static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
{
- struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'abort all outstanding command' timeout \n"
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
, pACB->host->host_no);
return false;
}
return true;
}
+
+static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- rtnval = arcmsr_abort_hba_allcmd(acb);
+ rtnval = arcmsr_hbaA_abort_allcmd(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- rtnval = arcmsr_abort_hbb_allcmd(acb);
+ rtnval = arcmsr_hbaB_abort_allcmd(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
- rtnval = arcmsr_abort_hbc_allcmd(acb);
+ rtnval = arcmsr_hbaC_abort_allcmd(acb);
}
- }
- return rtnval;
-}
+ break;
-static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
-{
- struct MessageUnit_B *reg = pacb->pmuB;
- writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
- printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
- return false;
+ case ACB_ADAPTER_TYPE_D:
+ rtnval = arcmsr_hbaD_abort_allcmd(acb);
+ break;
}
- return true;
+ return rtnval;
}
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -837,12 +1019,18 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C:{
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
/* disable all outbound interrupt */
orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
}
break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ /* disable all outbound interrupt */
+ writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
+ }
+ break;
}
return orig_mask;
}
@@ -933,7 +1121,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
{
int i = 0;
- uint32_t flag_ccb;
+ uint32_t flag_ccb, ccb_cdb_phy;
struct ARCMSR_CDB *pARCMSR_CDB;
bool error;
struct CommandControlBlock *pCCB;
@@ -961,8 +1149,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear all outbound posted Q*/
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
- if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
- writel(0, &reg->done_qbuffer[i]);
+ flag_ccb = reg->done_qbuffer[i];
+ if (flag_ccb != 0) {
+ reg->done_qbuffer[i] = 0;
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -975,11 +1164,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = acb->pmuC;
- struct ARCMSR_CDB *pARCMSR_CDB;
- uint32_t flag_ccb, ccb_cdb_phy;
- bool error;
- struct CommandControlBlock *pCCB;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
@@ -989,9 +1174,54 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
}
- }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *pmu = acb->pmuD;
+ uint32_t outbound_write_pointer;
+ uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
+ unsigned long flags;
+
+ residual = atomic_read(&acb->ccboutstandingcount);
+ for (i = 0; i < residual; i++) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ outbound_write_pointer =
+ pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((doneq_index & 0xFFF) !=
+ (outbound_write_pointer & 0xFFF)) {
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ addressLow = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressLow;
+ ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ pARCMSR_CDB = (struct ARCMSR_CDB *)
+ (acb->vir2phy_offset + ccb_cdb_phy);
+ pCCB = container_of(pARCMSR_CDB,
+ struct CommandControlBlock, arcmsr_cdb);
+ error = (addressLow &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
+ true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ writel(doneq_index,
+ pmu->outboundlist_read_pointer);
+ } else {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ mdelay(10);
+ }
+ }
+ pmu->postq_index = 0;
+ pmu->doneq_index = 0x40FF;
+ }
+ break;
}
}
+
static void arcmsr_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -1029,9 +1259,9 @@ static void arcmsr_remove(struct pci_dev *pdev)
}
}
}
- free_irq(pdev->irq, acb);
+ arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
- arcmsr_free_hbb_mu(acb);
+ arcmsr_free_mu(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1045,6 +1275,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
+ arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -1091,11 +1322,19 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
writel(intmask_org & mask, &reg->host_int_mask);
acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
+ writel(intmask_org | mask, reg->pcief0_int_enable);
+ break;
+ }
}
}
@@ -1115,7 +1354,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->TargetID = pcmd->device->id;
arcmsr_cdb->LUN = pcmd->device->lun;
arcmsr_cdb->Function = 1;
- arcmsr_cdb->Context = 0;
+ arcmsr_cdb->msgContext = 0;
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
nseg = scsi_dma_map(pcmd);
@@ -1156,7 +1395,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
{
- uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
+ uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
atomic_inc(&acb->ccboutstandingcount);
ccb->startdone = ARCMSR_CCB_START;
@@ -1165,25 +1404,24 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
struct MessageUnit_A __iomem *reg = acb->pmuA;
if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
- writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
+ writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
&reg->inbound_queueport);
- else {
- writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
- }
- }
+ else
+ writel(cdb_phyaddr, &reg->inbound_queueport);
break;
+ }
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
uint32_t ending_index, index = reg->postq_index;
ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
- writel(0, &reg->post_qbuffer[ending_index]);
+ reg->post_qbuffer[ending_index] = 0;
if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
- writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
- &reg->post_qbuffer[index]);
+ reg->post_qbuffer[index] =
+ cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
} else {
- writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
+ reg->post_qbuffer[index] = cdb_phyaddr;
}
index++;
index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
@@ -1192,11 +1430,11 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
uint32_t ccb_post_stamp, arc_cdb_size;
arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
- ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
+ ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
if (acb->cdb_phyaddr_hi32) {
writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
@@ -1204,62 +1442,102 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
}
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *pmu = acb->pmuD;
+ u16 index_stripped;
+ u16 postq_index, toggle;
+ unsigned long flags;
+ struct InBound_SRB *pinbound_srb;
+
+ spin_lock_irqsave(&acb->postq_lock, flags);
+ postq_index = pmu->postq_index;
+ pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
+ pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
+ pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
+ pinbound_srb->length = ccb->arc_cdb_size >> 2;
+ arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
+ toggle = postq_index & 0x4000;
+ index_stripped = postq_index + 1;
+ index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
+ pmu->postq_index = index_stripped ? (index_stripped | toggle) :
+ (toggle ^ 0x4000);
+ writel(postq_index, pmu->inboundlist_write_pointer);
+ spin_unlock_irqrestore(&acb->postq_lock, flags);
+ break;
+ }
}
}
-static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
- if (!arcmsr_hba_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
, acb->host->host_no);
}
}
-static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
, acb->host->host_no);
}
}
-static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
{
- struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
, pACB->host->host_no);
}
return;
}
+
+static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB))
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_stop_hba_bgrb(acb);
+ arcmsr_hbaA_stop_bgrb(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- arcmsr_stop_hbb_bgrb(acb);
+ arcmsr_hbaB_stop_bgrb(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
- arcmsr_stop_hbc_bgrb(acb);
+ arcmsr_hbaC_stop_bgrb(acb);
}
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_stop_bgrb(acb);
+ break;
}
}
@@ -1268,7 +1546,7 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
}
-void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
+static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
@@ -1284,8 +1562,16 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
+
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ }
+ break;
}
}
@@ -1320,6 +1606,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
+ reg->inbound_doorbell);
+ }
+ break;
}
}
@@ -1340,9 +1632,15 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1364,96 +1662,208 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
- }
-
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
-static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
+static uint32_t
+arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
+ struct QBUFFER __iomem *prbuffer)
{
- struct QBUFFER __iomem *prbuffer;
- struct QBUFFER *pQbuffer;
- uint8_t __iomem *iop_data;
- int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
- rqbuf_lastindex = acb->rqbuf_lastindex;
- rqbuf_firstindex = acb->rqbuf_firstindex;
- prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = (uint8_t __iomem *)prbuffer->data;
- iop_len = prbuffer->data_len;
- my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
-
- if (my_empty_len >= iop_len)
- {
- while (iop_len > 0) {
- pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
- memcpy(pQbuffer, iop_data, 1);
- rqbuf_lastindex++;
- rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
+ uint8_t *pQbuffer;
+ uint8_t *buf1 = NULL;
+ uint32_t __iomem *iop_data;
+ uint32_t iop_len, data_len, *buf2 = NULL;
+
+ iop_data = (uint32_t __iomem *)prbuffer->data;
+ iop_len = readl(&prbuffer->data_len);
+ if (iop_len > 0) {
+ buf1 = kmalloc(128, GFP_ATOMIC);
+ buf2 = (uint32_t *)buf1;
+ if (buf1 == NULL)
+ return 0;
+ data_len = iop_len;
+ while (data_len >= 4) {
+ *buf2++ = readl(iop_data);
iop_data++;
- iop_len--;
+ data_len -= 4;
}
- acb->rqbuf_lastindex = rqbuf_lastindex;
- arcmsr_iop_message_read(acb);
+ if (data_len)
+ *buf2 = readl(iop_data);
+ buf2 = (uint32_t *)buf1;
+ }
+ while (iop_len > 0) {
+ pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+ *pQbuffer = *buf1;
+ acb->rqbuf_putIndex++;
+ /* if last, index number set it to 0 */
+ acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ buf1++;
+ iop_len--;
}
+ kfree(buf2);
+ /* let IOP know data has been read */
+ arcmsr_iop_message_read(acb);
+ return 1;
+}
+
+uint32_t
+arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
+ struct QBUFFER __iomem *prbuffer) {
- else {
+ uint8_t *pQbuffer;
+ uint8_t __iomem *iop_data;
+ uint32_t iop_len;
+
+ if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
+ iop_data = (uint8_t __iomem *)prbuffer->data;
+ iop_len = readl(&prbuffer->data_len);
+ while (iop_len > 0) {
+ pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+ *pQbuffer = readb(iop_data);
+ acb->rqbuf_putIndex++;
+ acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ iop_data++;
+ iop_len--;
+ }
+ arcmsr_iop_message_read(acb);
+ return 1;
+}
+
+static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
+{
+ unsigned long flags;
+ struct QBUFFER __iomem *prbuffer;
+ int32_t buf_empty_len;
+
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ prbuffer = arcmsr_get_iop_rqbuffer(acb);
+ buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
+ (ARCMSR_MAX_QBUFFER - 1);
+ if (buf_empty_len >= readl(&prbuffer->data_len)) {
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ } else
acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+}
+
+static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
+{
+ uint8_t *pQbuffer;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t *buf1 = NULL;
+ uint32_t __iomem *iop_data;
+ uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
+
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
+ buf1 = kmalloc(128, GFP_ATOMIC);
+ buf2 = (uint32_t *)buf1;
+ if (buf1 == NULL)
+ return;
+
+ acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
+ pwbuffer = arcmsr_get_iop_wqbuffer(acb);
+ iop_data = (uint32_t __iomem *)pwbuffer->data;
+ while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ && (allxfer_len < 124)) {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+ *buf1 = *pQbuffer;
+ acb->wqbuf_getIndex++;
+ acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
+ buf1++;
+ allxfer_len++;
+ }
+ data_len = allxfer_len;
+ buf1 = (uint8_t *)buf2;
+ while (data_len >= 4) {
+ data = *buf2++;
+ writel(data, iop_data);
+ iop_data++;
+ data_len -= 4;
+ }
+ if (data_len) {
+ data = *buf2;
+ writel(data, iop_data);
+ }
+ writel(allxfer_len, &pwbuffer->data_len);
+ kfree(buf1);
+ arcmsr_iop_message_wrote(acb);
}
}
-static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
+void
+arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
{
- acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
- if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
- uint8_t *pQbuffer;
- struct QBUFFER __iomem *pwbuffer;
- uint8_t __iomem *iop_data;
- int32_t allxfer_len = 0;
+ uint8_t *pQbuffer;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t __iomem *iop_data;
+ int32_t allxfer_len = 0;
+ if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ arcmsr_write_ioctldata2iop_in_DWORD(acb);
+ return;
+ }
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
pwbuffer = arcmsr_get_iop_wqbuffer(acb);
iop_data = (uint8_t __iomem *)pwbuffer->data;
-
- while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
- (allxfer_len < 124)) {
- pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
- memcpy(iop_data, pQbuffer, 1);
- acb->wqbuf_firstindex++;
- acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
+ while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ && (allxfer_len < 124)) {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+ writeb(*pQbuffer, iop_data);
+ acb->wqbuf_getIndex++;
+ acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
iop_data++;
allxfer_len++;
}
- pwbuffer->data_len = allxfer_len;
-
+ writel(allxfer_len, &pwbuffer->data_len);
arcmsr_iop_message_wrote(acb);
}
+}
- if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
+static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
+ if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ arcmsr_write_ioctldata2iop(acb);
+ if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
- }
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
}
-static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell);
- if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
- arcmsr_iop2drv_data_wrote_handle(acb);
- }
-
- if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
- arcmsr_iop2drv_data_read_handle(acb);
- }
+ do {
+ writel(outbound_doorbell, &reg->outbound_doorbell);
+ if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(acb);
+ if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(acb);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
+ | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
}
-static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
{
uint32_t outbound_doorbell;
- struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
/*
*******************************************************************
** Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1462,19 +1872,42 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
*******************************************************************
*/
outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
- if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
- arcmsr_iop2drv_data_wrote_handle(pACB);
- }
- if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
- arcmsr_iop2drv_data_read_handle(pACB);
- }
- if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */
- }
- return;
+ do {
+ writel(outbound_doorbell, &reg->outbound_doorbell_clear);
+ readl(&reg->outbound_doorbell_clear);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(pACB);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaC_message_isr(pACB);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
}
-static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
+
+static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell;
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ outbound_doorbell = readl(pmu->outbound_doorbell);
+ do {
+ writel(outbound_doorbell, pmu->outbound_doorbell);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaD_message_isr(pACB);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(pACB);
+ outbound_doorbell = readl(pmu->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
+ | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
+}
+
+static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -1488,7 +1921,7 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
arcmsr_drain_donequeue(acb, pCCB, error);
}
}
-static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t index;
uint32_t flag_ccb;
@@ -1497,8 +1930,8 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
struct CommandControlBlock *pCCB;
bool error;
index = reg->doneq_index;
- while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
- writel(0, &reg->done_qbuffer[index]);
+ while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
+ reg->done_qbuffer[index] = 0;
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1509,35 +1942,80 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
}
}
-static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
{
- struct MessageUnit_C *phbcmu;
+ struct MessageUnit_C __iomem *phbcmu;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
int error;
- phbcmu = (struct MessageUnit_C *)acb->pmuC;
+ phbcmu = acb->pmuC;
/* areca cdb command done */
/* Use correct offset and size for syncing */
- while (readl(&phbcmu->host_int_status) &
- ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
- /* check if command done with no error*/
- flag_ccb = readl(&phbcmu->outbound_queueport_low);
- ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
- ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
- error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
- /* check if command done with no error */
- arcmsr_drain_donequeue(acb, ccb, error);
- if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
- writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
- break;
- }
- throttling++;
+ while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
+ 0xFFFFFFFF) {
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ + ccb_cdb_phy);
+ ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
+ arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ /* check if command done with no error */
+ arcmsr_drain_donequeue(acb, ccb, error);
+ throttling++;
+ if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
+ writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
+ &phbcmu->inbound_doorbell);
+ throttling = 0;
+ }
}
}
+
+static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
+ uint32_t addressLow, ccb_cdb_phy;
+ int error;
+ struct MessageUnit_D *pmu;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ pmu = acb->pmuD;
+ outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
+ do {
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ addressLow = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressLow;
+ ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ + ccb_cdb_phy);
+ ccb = container_of(arcmsr_cdb,
+ struct CommandControlBlock, arcmsr_cdb);
+ error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ writel(doneq_index, pmu->outboundlist_read_pointer);
+ } while ((doneq_index & 0xFFF) !=
+ (outbound_write_pointer & 0xFFF));
+ }
+ writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
+ pmu->outboundlist_interrupt_cause);
+ readl(pmu->outboundlist_interrupt_cause);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -1546,14 +2024,14 @@ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
-static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A *reg = acb->pmuA;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
-static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
@@ -1570,114 +2048,142 @@ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
-static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
{
- struct MessageUnit_C *reg = acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
-static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
+ readl(reg->outbound_doorbell);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_intstatus;
struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_intstatus = readl(&reg->outbound_intstatus) &
acb->outbound_int_enable;
- if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
- return 1;
- }
- writel(outbound_intstatus, &reg->outbound_intstatus);
- if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
- arcmsr_hba_doorbell_isr(acb);
- }
- if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
- arcmsr_hba_postqueue_isr(acb);
- }
- if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
- /* messenger of "driver to iop commands" */
- arcmsr_hba_message_isr(acb);
- }
- return 0;
+ if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
+ return IRQ_NONE;
+ do {
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
+ arcmsr_hbaA_doorbell_isr(acb);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
+ arcmsr_hbaA_postqueue_isr(acb);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
+ arcmsr_hbaA_message_isr(acb);
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
+ | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
+ | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
+ return IRQ_HANDLED;
}
-static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
+static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
struct MessageUnit_B *reg = acb->pmuB;
outbound_doorbell = readl(reg->iop2drv_doorbell) &
acb->outbound_int_enable;
if (!outbound_doorbell)
- return 1;
-
- writel(~outbound_doorbell, reg->iop2drv_doorbell);
- /*in case the last action of doorbell interrupt clearance is cached,
- this action can push HW to write down the clear bit*/
- readl(reg->iop2drv_doorbell);
- writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
- if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
- arcmsr_iop2drv_data_wrote_handle(acb);
- }
- if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
- arcmsr_iop2drv_data_read_handle(acb);
- }
- if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
- arcmsr_hbb_postqueue_isr(acb);
- }
- if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
- /* messenger of "driver to iop commands" */
- arcmsr_hbb_message_isr(acb);
- }
- return 0;
+ return IRQ_NONE;
+ do {
+ writel(~outbound_doorbell, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
+ arcmsr_hbaB_postqueue_isr(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaB_message_isr(acb);
+ outbound_doorbell = readl(reg->iop2drv_doorbell) &
+ acb->outbound_int_enable;
+ } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_IOP2DRV_DATA_READ_OK
+ | ARCMSR_IOP2DRV_CDB_DONE
+ | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
+ return IRQ_HANDLED;
}
-static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
+static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
{
uint32_t host_interrupt_status;
- struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
/*
*********************************************
** check outbound intstatus
*********************************************
*/
- host_interrupt_status = readl(&phbcmu->host_int_status);
- if (!host_interrupt_status) {
- /*it must be share irq*/
- return 1;
- }
- /* MU ioctl transfer doorbell interrupts*/
- if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
- arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */
- }
- /* MU post queue interrupts*/
- if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
- arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */
- }
- return 0;
+ host_interrupt_status = readl(&phbcmu->host_int_status) &
+ (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaC_doorbell_isr(pACB);
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaC_postqueue_isr(pACB);
+ host_interrupt_status = readl(&phbcmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
}
+
+static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
+{
+ u32 host_interrupt_status;
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ host_interrupt_status = readl(pmu->host_int_status) &
+ (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU post queue interrupts*/
+ if (host_interrupt_status &
+ ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaD_postqueue_isr(pACB);
+ if (host_interrupt_status &
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaD_doorbell_isr(pACB);
+ host_interrupt_status = readl(pmu->host_int_status);
+ } while (host_interrupt_status &
+ (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
- if (arcmsr_handle_hba_isr(acb)) {
- return IRQ_NONE;
- }
- }
+ case ACB_ADAPTER_TYPE_A:
+ return arcmsr_hbaA_handle_isr(acb);
break;
-
- case ACB_ADAPTER_TYPE_B: {
- if (arcmsr_handle_hbb_isr(acb)) {
- return IRQ_NONE;
- }
- }
+ case ACB_ADAPTER_TYPE_B:
+ return arcmsr_hbaB_handle_isr(acb);
break;
- case ACB_ADAPTER_TYPE_C: {
- if (arcmsr_handle_hbc_isr(acb)) {
- return IRQ_NONE;
- }
- }
+ case ACB_ADAPTER_TYPE_C:
+ return arcmsr_hbaC_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_D:
+ return arcmsr_hbaD_handle_isr(acb);
+ default:
+ return IRQ_NONE;
}
- return IRQ_HANDLED;
}
static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
@@ -1695,296 +2201,273 @@ static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
}
}
-void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
+
+void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
{
- int32_t wqbuf_firstindex, wqbuf_lastindex;
- uint8_t *pQbuffer;
- struct QBUFFER __iomem *pwbuffer;
- uint8_t __iomem *iop_data;
- int32_t allxfer_len = 0;
- pwbuffer = arcmsr_get_iop_wqbuffer(acb);
- iop_data = (uint8_t __iomem *)pwbuffer->data;
- if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
- acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
- wqbuf_firstindex = acb->wqbuf_firstindex;
- wqbuf_lastindex = acb->wqbuf_lastindex;
- while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
- pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
- memcpy(iop_data, pQbuffer, 1);
- wqbuf_firstindex++;
- wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- allxfer_len++;
+ uint32_t i;
+
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ for (i = 0; i < 15; i++) {
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ arcmsr_iop_message_read(acb);
+ mdelay(30);
+ } else if (acb->rqbuf_getIndex !=
+ acb->rqbuf_putIndex) {
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ mdelay(30);
+ } else
+ break;
}
- acb->wqbuf_firstindex = wqbuf_firstindex;
- pwbuffer->data_len = allxfer_len;
- arcmsr_iop_message_wrote(acb);
}
}
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
- struct scsi_cmnd *cmd)
+ struct scsi_cmnd *cmd)
{
- struct CMD_MESSAGE_FIELD *pcmdmessagefld;
- int retvalue = 0, transfer_len = 0;
char *buffer;
+ unsigned short use_sg;
+ int retvalue = 0, transfer_len = 0;
+ unsigned long flags;
+ struct CMD_MESSAGE_FIELD *pcmdmessagefld;
+ uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
+ (uint32_t)cmd->cmnd[6] << 16 |
+ (uint32_t)cmd->cmnd[7] << 8 |
+ (uint32_t)cmd->cmnd[8];
struct scatterlist *sg;
- uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
- (uint32_t ) cmd->cmnd[6] << 16 |
- (uint32_t ) cmd->cmnd[7] << 8 |
- (uint32_t ) cmd->cmnd[8];
- /* 4 bytes: Areca io control code */
+
+ use_sg = scsi_sg_count(cmd);
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
- if (scsi_sg_count(cmd) > 1) {
+ if (use_sg > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
transfer_len += sg->length;
-
if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
goto message_out;
}
- pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
- switch(controlcode) {
-
+ pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
+ switch (controlcode) {
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned char *ver_addr;
- uint8_t *pQbuffer, *ptmpQbuffer;
- int32_t allxfer_len = 0;
-
- ver_addr = kmalloc(1032, GFP_ATOMIC);
+ uint8_t *ptmpQbuffer;
+ uint32_t allxfer_len = 0;
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: memory not enough!\n", __func__);
goto message_out;
}
-
ptmpQbuffer = ver_addr;
- while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
- && (allxfer_len < 1031)) {
- pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
- memcpy(ptmpQbuffer, pQbuffer, 1);
- acb->rqbuf_firstindex++;
- acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
- ptmpQbuffer++;
- allxfer_len++;
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+ unsigned int tail = acb->rqbuf_getIndex;
+ unsigned int head = acb->rqbuf_putIndex;
+ unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+ allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+ if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+ allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+ if (allxfer_len <= cnt_to_end)
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+ else {
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+ memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+ }
+ acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
}
+ memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
+ allxfer_len);
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-
struct QBUFFER __iomem *prbuffer;
- uint8_t __iomem *iop_data;
- int32_t iop_len;
-
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = prbuffer->data;
- iop_len = readl(&prbuffer->data_len);
- while (iop_len > 0) {
- acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
- acb->rqbuf_lastindex++;
- acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- iop_len--;
- }
- arcmsr_iop_message_read(acb);
- }
- memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
- pcmdmessagefld->cmdmessage.Length = allxfer_len;
- if(acb->fw_flag == FW_DEADLOCK) {
- pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
- pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
}
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
kfree(ver_addr);
- }
+ pcmdmessagefld->cmdmessage.Length = allxfer_len;
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
break;
-
+ }
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned char *ver_addr;
- int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
+ int32_t user_len, cnt2end;
uint8_t *pQbuffer, *ptmpuserbuffer;
-
- ver_addr = kmalloc(1032, GFP_ATOMIC);
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
- if(acb->fw_flag == FW_DEADLOCK) {
- pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
- pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_OK;
- }
ptmpuserbuffer = ver_addr;
user_len = pcmdmessagefld->cmdmessage.Length;
- memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
- wqbuf_lastindex = acb->wqbuf_lastindex;
- wqbuf_firstindex = acb->wqbuf_firstindex;
- if (wqbuf_lastindex != wqbuf_firstindex) {
+ memcpy(ptmpuserbuffer,
+ pcmdmessagefld->messagedatabuffer, user_len);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
struct SENSE_DATA *sensebuffer =
(struct SENSE_DATA *)cmd->sense_buffer;
- arcmsr_post_ioctldata2iop(acb);
+ arcmsr_write_ioctldata2iop(acb);
/* has error report sensedata */
- sensebuffer->ErrorCode = 0x70;
+ sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->SenseKey = ILLEGAL_REQUEST;
sensebuffer->AdditionalSenseLength = 0x0A;
sensebuffer->AdditionalSenseCode = 0x20;
sensebuffer->Valid = 1;
retvalue = ARCMSR_MESSAGE_FAIL;
} else {
- my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
- &(ARCMSR_MAX_QBUFFER - 1);
- if (my_empty_len >= user_len) {
- while (user_len > 0) {
- pQbuffer =
- &acb->wqbuffer[acb->wqbuf_lastindex];
- memcpy(pQbuffer, ptmpuserbuffer, 1);
- acb->wqbuf_lastindex++;
- acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
- ptmpuserbuffer++;
- user_len--;
- }
- if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
- acb->acb_flags &=
- ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
- arcmsr_post_ioctldata2iop(acb);
- }
- } else {
- /* has error report sensedata */
- struct SENSE_DATA *sensebuffer =
- (struct SENSE_DATA *)cmd->sense_buffer;
- sensebuffer->ErrorCode = 0x70;
- sensebuffer->SenseKey = ILLEGAL_REQUEST;
- sensebuffer->AdditionalSenseLength = 0x0A;
- sensebuffer->AdditionalSenseCode = 0x20;
- sensebuffer->Valid = 1;
- retvalue = ARCMSR_MESSAGE_FAIL;
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+ cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+ if (user_len > cnt2end) {
+ memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+ ptmpuserbuffer += cnt2end;
+ user_len -= cnt2end;
+ acb->wqbuf_putIndex = 0;
+ pQbuffer = acb->wqbuffer;
}
+ memcpy(pQbuffer, ptmpuserbuffer, user_len);
+ acb->wqbuf_putIndex += user_len;
+ acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+ acb->acb_flags &=
+ ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ arcmsr_write_ioctldata2iop(acb);
}
- kfree(ver_addr);
}
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ kfree(ver_addr);
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
break;
-
+ }
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
uint8_t *pQbuffer = acb->rqbuffer;
- if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
- }
+
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
- acb->rqbuf_firstindex = 0;
- acb->rqbuf_lastindex = 0;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
- if(acb->fw_flag == FW_DEADLOCK) {
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_OK;
- }
- }
+ ARCMSR_MESSAGE_RETURNCODE_OK;
break;
-
+ }
case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
uint8_t *pQbuffer = acb->wqbuffer;
- if(acb->fw_flag == FW_DEADLOCK) {
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
+ memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_OK;
- }
-
- if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
- }
- acb->acb_flags |=
- (ACB_F_MESSAGE_WQBUFFER_CLEARED |
- ACB_F_MESSAGE_WQBUFFER_READED);
- acb->wqbuf_firstindex = 0;
- acb->wqbuf_lastindex = 0;
- memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
- }
+ ARCMSR_MESSAGE_RETURNCODE_OK;
break;
-
+ }
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
uint8_t *pQbuffer;
-
- if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
- arcmsr_iop_message_read(acb);
- }
- acb->acb_flags |=
- (ACB_F_MESSAGE_WQBUFFER_CLEARED
- | ACB_F_MESSAGE_RQBUFFER_CLEARED
- | ACB_F_MESSAGE_WQBUFFER_READED);
- acb->rqbuf_firstindex = 0;
- acb->rqbuf_lastindex = 0;
- acb->wqbuf_firstindex = 0;
- acb->wqbuf_lastindex = 0;
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
- if(acb->fw_flag == FW_DEADLOCK) {
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_OK;
- }
- }
+ ARCMSR_MESSAGE_RETURNCODE_OK;
break;
-
+ }
case ARCMSR_MESSAGE_RETURN_CODE_3F: {
- if(acb->fw_flag == FW_DEADLOCK) {
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_3F;
- }
+ ARCMSR_MESSAGE_RETURNCODE_3F;
break;
- }
+ }
case ARCMSR_MESSAGE_SAY_HELLO: {
int8_t *hello_string = "Hello! I am ARCMSR";
- if(acb->fw_flag == FW_DEADLOCK) {
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }else{
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_OK;
- }
- memcpy(pcmdmessagefld->messagedatabuffer, hello_string
- , (int16_t)strlen(hello_string));
- }
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ memcpy(pcmdmessagefld->messagedatabuffer,
+ hello_string, (int16_t)strlen(hello_string));
break;
-
- case ARCMSR_MESSAGE_SAY_GOODBYE:
- if(acb->fw_flag == FW_DEADLOCK) {
+ }
+ case ARCMSR_MESSAGE_SAY_GOODBYE: {
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
arcmsr_iop_parking(acb);
break;
-
- case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
- if(acb->fw_flag == FW_DEADLOCK) {
+ }
+ case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
+ if (acb->fw_flag == FW_DEADLOCK)
pcmdmessagefld->cmdmessage.ReturnCode =
- ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
- }
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
arcmsr_flush_adapter_cache(acb);
break;
-
+ }
default:
retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: unknown controlcode!\n", __func__);
+ }
+message_out:
+ if (use_sg) {
+ struct scatterlist *sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset);
}
- message_out:
- sg = scsi_sglist(cmd);
- kunmap_atomic(buffer - sg->offset);
return retvalue;
}
@@ -1999,7 +2482,7 @@ static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock
list_del_init(&ccb->list);
}else{
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
- return 0;
+ return NULL;
}
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
return ccb;
@@ -2079,9 +2562,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
arcmsr_handle_virtual_command(acb, cmd);
return 0;
}
- if (atomic_read(&acb->ccboutstandingcount) >=
- ARCMSR_MAX_OUTSTANDING_CMD)
- return SCSI_MLQUEUE_HOST_BUSY;
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2096,7 +2576,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
char *acb_firm_model = acb->firm_model;
@@ -2107,7 +2587,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hba_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
return false;
@@ -2135,10 +2615,10 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
iop_device_map++;
count--;
}
- printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
acb->host->host_no,
- acb->firm_version,
- acb->firm_model);
+ acb->firm_model,
+ acb->firm_version);
acb->signature = readl(&reg->message_rwbuffer[0]);
acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
@@ -2147,7 +2627,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
return true;
}
-static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
struct pci_dev *pdev = acb->pdev;
@@ -2163,12 +2643,18 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
char __iomem *iop_device_map;
/*firm_version,21,84-99*/
int count;
- dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
+
+ acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
if (!dma_coherent){
- printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
+ printk(KERN_NOTICE
+ "arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
+ acb->host->host_no);
return false;
}
- acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
reg = (struct MessageUnit_B *)dma_coherent;
acb->pmuB = reg;
reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
@@ -2183,7 +2669,7 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
return false;
@@ -2211,10 +2697,10 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
count--;
}
- printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
acb->host->host_no,
- acb->firm_version,
- acb->firm_model);
+ acb->firm_model,
+ acb->firm_version);
acb->signature = readl(&reg->message_rwbuffer[1]);
/*firm_signature,1,00-03*/
@@ -2231,14 +2717,14 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
return true;
}
-static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
+static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
uint32_t intmask_org, Index, firmware_state = 0;
- struct MessageUnit_C *reg = pACB->pmuC;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
char *acb_firm_model = pACB->firm_model;
char *acb_firm_version = pACB->firm_version;
- char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
+ char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
+ char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
int count;
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
@@ -2277,10 +2763,10 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
iop_firm_version++;
count--;
}
- printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
pACB->host->host_no,
- pACB->firm_version,
- pACB->firm_model);
+ pACB->firm_model,
+ pACB->firm_version);
pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
@@ -2289,17 +2775,166 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
/*all interrupt service will be enable at arcmsr_iop_init*/
return true;
}
+
+static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
+{
+ char *acb_firm_model = acb->firm_model;
+ char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
+ char __iomem *iop_firm_model;
+ char __iomem *iop_firm_version;
+ char __iomem *iop_device_map;
+ u32 count;
+ struct MessageUnit_D *reg;
+ void *dma_coherent2;
+ dma_addr_t dma_coherent_handle2;
+ struct pci_dev *pdev = acb->pdev;
+
+ acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
+ dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle2, GFP_KERNEL);
+ if (!dma_coherent2) {
+ pr_notice("DMA allocation failed...\n");
+ return false;
+ }
+ memset(dma_coherent2, 0, acb->roundup_ccbsize);
+ acb->dma_coherent_handle2 = dma_coherent_handle2;
+ acb->dma_coherent2 = dma_coherent2;
+ reg = (struct MessageUnit_D *)dma_coherent2;
+ acb->pmuD = reg;
+ reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
+ reg->cpu_mem_config = acb->mem_base0 +
+ ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
+ reg->i2o_host_interrupt_mask = acb->mem_base0 +
+ ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
+ reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
+ reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
+ reg->host_int_status = acb->mem_base0 +
+ ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
+ reg->pcief0_int_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
+ reg->inbound_msgaddr0 = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_MESSAGE0;
+ reg->inbound_msgaddr1 = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_MESSAGE1;
+ reg->outbound_msgaddr0 = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
+ reg->outbound_msgaddr1 = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
+ reg->inbound_doorbell = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_DOORBELL;
+ reg->outbound_doorbell = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL;
+ reg->outbound_doorbell_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
+ reg->inboundlist_base_low = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
+ reg->inboundlist_base_high = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
+ reg->inboundlist_write_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
+ reg->outboundlist_base_low = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
+ reg->outboundlist_base_high = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
+ reg->outboundlist_copy_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
+ reg->outboundlist_read_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
+ reg->outboundlist_interrupt_cause = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
+ reg->outboundlist_interrupt_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
+ reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
+ reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
+ reg->msgcode_rwbuffer = acb->mem_base0 +
+ ARCMSR_ARC1214_MESSAGE_RWBUFFER;
+ iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
+ iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
+ iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ if (readl(acb->pmuD->outbound_doorbell) &
+ ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+ acb->pmuD->outbound_doorbell);/*clear interrupt*/
+ }
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ /* wait message ready */
+ if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", acb->host->host_no);
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
+ return false;
+ }
+ count = 8;
+ while (count) {
+ *acb_firm_model = readb(iop_firm_model);
+ acb_firm_model++;
+ iop_firm_model++;
+ count--;
+ }
+ count = 16;
+ while (count) {
+ *acb_firm_version = readb(iop_firm_version);
+ acb_firm_version++;
+ iop_firm_version++;
+ count--;
+ }
+ count = 16;
+ while (count) {
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+ acb->signature = readl(&reg->msgcode_rwbuffer[1]);
+ /*firm_signature,1,00-03*/
+ acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
+ /*firm_request_len,1,04-07*/
+ acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
+ /*firm_numbers_queue,2,08-11*/
+ acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
+ /*firm_sdram_size,3,12-15*/
+ acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
+ /*firm_hd_channels,4,16-19*/
+ acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+ acb->host->host_no,
+ acb->firm_model,
+ acb->firm_version);
+ return true;
+}
+
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
{
- if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
- return arcmsr_get_hba_config(acb);
- else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
- return arcmsr_get_hbb_config(acb);
+ bool rtn = false;
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ rtn = arcmsr_hbaA_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_B:
+ rtn = arcmsr_hbaB_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ rtn = arcmsr_hbaC_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ rtn = arcmsr_hbaD_get_config(acb);
+ break;
+ default:
+ break;
+ }
+ if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
+ acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
else
- return arcmsr_get_hbc_config(acb);
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ acb->host->can_queue = acb->maxOutstanding;
+ return rtn;
}
-static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
+static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2328,7 +2963,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
}
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
- poll_ccb_done = (ccb == poll_ccb) ? 1:0;
+ poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2355,7 +2990,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
return rtn;
}
-static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
+static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
@@ -2371,7 +3006,8 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
while(1){
index = reg->doneq_index;
- if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
+ flag_ccb = reg->done_qbuffer[index];
+ if (flag_ccb == 0) {
if (poll_ccb_done){
rtn = SUCCESS;
break;
@@ -2384,7 +3020,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
goto polling_hbb_ccb_retry;
}
}
- writel(0, &reg->done_qbuffer[index]);
+ reg->done_qbuffer[index] = 0;
index++;
/*if last index number set it to 0 */
index %= ARCMSR_MAX_HBB_POSTQUEUE;
@@ -2392,7 +3028,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
/* check if command done with no error*/
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
- poll_ccb_done = (ccb == poll_ccb) ? 1:0;
+ poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2419,9 +3055,10 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
return rtn;
}
-static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
+static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
{
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
uint32_t flag_ccb, ccb_cdb_phy;
struct ARCMSR_CDB *arcmsr_cdb;
bool error;
@@ -2448,7 +3085,7 @@ polling_hbc_ccb_retry:
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
- poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
/* check ifcommand done with no error*/
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
@@ -2475,6 +3112,81 @@ polling_hbc_ccb_retry:
}
return rtn;
}
+
+static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
+ int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
+ unsigned long flags;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_D *pmu = acb->pmuD;
+
+polling_hbaD_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaD_ccb_retry;
+ }
+ }
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
+ ccb_cdb_phy);
+ pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
+ arcmsr_cdb);
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ if ((pCCB->acb != acb) ||
+ (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -2482,17 +3194,21 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
+ rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
+ rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
}
break;
case ACB_ADAPTER_TYPE_C: {
- rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
+ rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
}
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
@@ -2500,6 +3216,7 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
+ dma_addr_t dma_coherent_handle;
/*
********************************************************************
@@ -2507,8 +3224,17 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
** if freeccb.HighPart is not zero
********************************************************************
*/
- cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
- cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_B:
+ case ACB_ADAPTER_TYPE_D:
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ break;
+ default:
+ dma_coherent_handle = acb->dma_coherent_handle;
+ break;
+ }
+ cdb_phyaddr = lower_32_bits(dma_coherent_handle);
+ cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
/*
***********************************************************************
@@ -2520,65 +3246,62 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_A: {
if (cdb_phyaddr_hi32 != 0) {
struct MessageUnit_A __iomem *reg = acb->pmuA;
- uint32_t intmask_org;
- intmask_org = arcmsr_disable_outbound_ints(acb);
writel(ARCMSR_SIGNATURE_SET_CONFIG, \
&reg->message_rwbuffer[0]);
writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
&reg->inbound_msgaddr0);
- if (!arcmsr_hba_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
part physical address timeout\n",
acb->host->host_no);
return 1;
}
- arcmsr_enable_outbound_ints(acb, intmask_org);
}
}
break;
case ACB_ADAPTER_TYPE_B: {
- unsigned long post_queue_phyaddr;
uint32_t __iomem *rwbuffer;
struct MessageUnit_B *reg = acb->pmuB;
- uint32_t intmask_org;
- intmask_org = arcmsr_disable_outbound_ints(acb);
reg->postq_index = 0;
reg->doneq_index = 0;
writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
acb->host->host_no);
return 1;
}
- post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
rwbuffer = reg->message_rwbuffer;
/* driver "set config" signature */
writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
/* normal should be zero */
writel(cdb_phyaddr_hi32, rwbuffer++);
/* postQ size (256 + 8)*4 */
- writel(post_queue_phyaddr, rwbuffer++);
+ writel(cdb_phyaddr, rwbuffer++);
/* doneQ size (256 + 8)*4 */
- writel(post_queue_phyaddr + 1056, rwbuffer++);
+ writel(cdb_phyaddr + 1056, rwbuffer++);
/* ccb maxQ size must be --> [(256 + 8)*4]*/
writel(1056, rwbuffer);
writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
timeout \n",acb->host->host_no);
return 1;
}
- arcmsr_hbb_enable_driver_mode(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
+ writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ pr_err("arcmsr%d: can't set driver mode.\n",
+ acb->host->host_no);
+ return 1;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
if (cdb_phyaddr_hi32 != 0) {
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
acb->adapter_index, cdb_phyaddr_hi32);
@@ -2586,13 +3309,34 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- if (!arcmsr_hbc_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
timeout \n", acb->host->host_no);
return 1;
}
}
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = acb->pmuD;
+ reg->postq_index = 0;
+ reg->doneq_index = 0;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
+ writel(cdb_phyaddr_hi32, rwbuffer++);
+ writel(cdb_phyaddr, rwbuffer++);
+ writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
+ sizeof(struct InBound_SRB)), rwbuffer++);
+ writel(0x100, rwbuffer);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -2619,15 +3363,24 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
do {
firmware_state = readl(&reg->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
}
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ do {
+ firmware_state = readl(reg->outbound_msgaddr1);
+ } while ((firmware_state &
+ ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
+ }
+ break;
}
}
-static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
@@ -2649,9 +3402,9 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
return;
}
-static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B __iomem *reg = acb->pmuB;
+ struct MessageUnit_B *reg = acb->pmuB;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
@@ -2671,7 +3424,7 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
return;
}
-static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_C __iomem *reg = acb->pmuC;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
@@ -2694,69 +3447,119 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
return;
}
+static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
+ ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
+ ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ } else {
+ acb->fw_flag = FW_NORMAL;
+ if (atomic_read(&acb->ante_token_value) ==
+ atomic_read(&acb->rq_map_token)) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ atomic_set(&acb->ante_token_value,
+ atomic_read(&acb->rq_map_token));
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ return;
+ }
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
+ reg->inbound_msgaddr0);
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ }
+}
+
static void arcmsr_request_device_map(unsigned long pacb)
{
struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_request_hba_device_map(acb);
+ arcmsr_hbaA_request_device_map(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- arcmsr_request_hbb_device_map(acb);
+ arcmsr_hbaB_request_device_map(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
- arcmsr_request_hbc_device_map(acb);
+ arcmsr_hbaC_request_device_map(acb);
}
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_request_device_map(acb);
+ break;
}
}
-static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
- if (!arcmsr_hba_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", acb->host->host_no);
}
}
-static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n",acb->host->host_no);
}
}
-static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
{
- struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
pACB->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
- if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", pACB->host->host_no);
}
return;
}
+
+static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
- arcmsr_start_hba_bgrb(acb);
+ arcmsr_hbaA_start_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_B:
- arcmsr_start_hbb_bgrb(acb);
+ arcmsr_hbaB_start_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_C:
- arcmsr_start_hbc_bgrb(acb);
+ arcmsr_hbaC_start_bgrb(acb);
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_start_bgrb(acb);
+ break;
}
}
@@ -2783,13 +3586,48 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
- struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
- uint32_t outbound_doorbell;
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ uint32_t outbound_doorbell, i;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(&reg->outbound_doorbell);
writel(outbound_doorbell, &reg->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+ for (i = 0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ if (outbound_doorbell &
+ ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(outbound_doorbell,
+ &reg->outbound_doorbell_clear);
+ writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
+ &reg->inbound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ uint32_t outbound_doorbell, i;
+ /* empty doorbell Qbuffer if door bell ringed */
+ outbound_doorbell = readl(reg->outbound_doorbell);
+ writel(outbound_doorbell, reg->outbound_doorbell);
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ for (i = 0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->outbound_doorbell);
+ if (outbound_doorbell &
+ ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
+ writel(outbound_doorbell,
+ reg->outbound_doorbell);
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ } else
+ break;
}
+ }
+ break;
}
}
@@ -2802,7 +3640,7 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
- if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
return;
}
@@ -2820,6 +3658,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
int i, count = 0;
struct MessageUnit_A __iomem *pmuA = acb->pmuA;
struct MessageUnit_C __iomem *pmuC = acb->pmuC;
+ struct MessageUnit_D *pmuD = acb->pmuD;
/* backup pci config data */
printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
@@ -2840,6 +3679,8 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if ((acb->dev_id == 0x1214)) {
+ writel(0x20, pmuD->reset_request);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
}
@@ -3016,9 +3857,7 @@ sleep:
arcmsr_get_firmware_spec(acb);
arcmsr_start_adapter_bgrb(acb);
/* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
- writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+ arcmsr_clear_doorbell_queue_buffer(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
atomic_set(&acb->rq_map_token, 16);
@@ -3038,6 +3877,66 @@ sleep:
}
break;
}
+ case ACB_ADAPTER_TYPE_D: {
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is an bus reset"
+ " eh proceeding.......\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ struct MessageUnit_D *reg;
+ reg = acb->pmuD;
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+ nap:
+ ssleep(ARCMSR_SLEEPTIME);
+ if ((readl(reg->sample_at_reset) & 0x80) != 0) {
+ pr_err("arcmsr%d: waiting for "
+ "hw bus reset return, retry=%d\n",
+ acb->host->host_no, retry_count);
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_err("arcmsr%d: waiting for hw bus"
+ " reset return, "
+ "RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
+ }
+ retry_count++;
+ goto nap;
+ }
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb);
+ arcmsr_start_adapter_bgrb(acb);
+ arcmsr_clear_doorbell_queue_buffer(acb);
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_err("arcmsr: scsi bus reset "
+ "eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
+ }
+ break;
+ }
}
return rtn;
}
@@ -3056,8 +3955,10 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
(struct AdapterControlBlock *)cmd->device->host->hostdata;
int i = 0;
int rtn = FAILED;
+ uint32_t intmask_org;
+
printk(KERN_NOTICE
- "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
+ "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
acb->acb_flags |= ACB_F_ABORT;
acb->num_aborts++;
@@ -3067,9 +3968,12 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
** we need to handle it as soon as possible and exit
************************************************
*/
- if (!atomic_read(&acb->ccboutstandingcount))
+ if (!atomic_read(&acb->ccboutstandingcount)) {
+ acb->acb_flags &= ~ACB_F_ABORT;
return rtn;
+ }
+ intmask_org = arcmsr_disable_outbound_ints(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
@@ -3079,6 +3983,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
}
acb->acb_flags &= ~ACB_F_ABORT;
+ arcmsr_enable_outbound_ints(acb, intmask_org);
return rtn;
}
@@ -3108,19 +4013,20 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1280:
type = "SATA";
break;
+ case PCI_DEVICE_ID_ARECA_1214:
case PCI_DEVICE_ID_ARECA_1380:
case PCI_DEVICE_ID_ARECA_1381:
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
- type = "SAS";
+ type = "SAS/SATA";
break;
default:
- type = "X-TYPE";
+ type = "unknown";
+ raid6 = 0;
break;
}
- sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
- type, raid6 ? "( RAID6 capable)" : "",
- ARCMSR_DRIVER_VERSION);
+ sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
+ type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
return buf;
}
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 860f527d8f26..81e83a65a193 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 1432ed5e9fc6..80d97f3d2ed9 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -275,6 +275,19 @@ bool is_link_state_evt(u32 trailer)
ASYNC_EVENT_CODE_LINK_STATE);
}
+static bool is_iscsi_evt(u32 trailer)
+{
+ return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_ISCSI;
+}
+
+static int iscsi_evt_type(u32 trailer)
+{
+ return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+}
+
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
if (compl->flags != 0) {
@@ -438,7 +451,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_LINK_UP;
+ phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -461,7 +474,28 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
/* Interpret compl as a async link evt */
beiscsi_async_link_state_process(phba,
(struct be_async_event_link_state *) compl);
- else
+ else if (is_iscsi_evt(compl->flags)) {
+ switch (iscsi_evt_type(compl->flags)) {
+ case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
+ case ASYNC_EVENT_NEW_ISCSI_CONN:
+ case ASYNC_EVENT_NEW_TCP_CONN:
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Async iscsi Event,"
+ " flags handled = 0x%08x\n",
+ compl->flags);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Unsupported Async"
+ " Event, flags = 0x%08x\n",
+ compl->flags);
+ }
+ } else
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG |
BEISCSI_LOG_MBOX,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index cc7405c0eca0..98897434bcb4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -26,9 +26,9 @@
* The commands are serviced by the ARM processor in the OneConnect's MPU.
*/
struct be_sge {
- u32 pa_lo;
- u32 pa_hi;
- u32 len;
+ __le32 pa_lo;
+ __le32 pa_hi;
+ __le32 len;
};
#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
@@ -118,6 +118,14 @@ struct be_mcc_compl {
#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_CODE_ISCSI 0x4
+
+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF
+#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
+#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5
+#define ASYNC_EVENT_NEW_TCP_CONN 0x7
+
struct be_async_event_trailer {
u32 code;
};
@@ -624,11 +632,11 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
/******************** Modify EQ Delay *******************/
struct be_cmd_req_modify_eq_delay {
struct be_cmd_req_hdr hdr;
- u32 num_eq;
+ __le32 num_eq;
struct {
- u32 eq_id;
- u32 phase;
- u32 delay_multiplier;
+ __le32 eq_id;
+ __le32 phase;
+ __le32 delay_multiplier;
} delay[MAX_CPUS];
} __packed;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 86162811812d..b7391a3f9f0b 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1274,6 +1274,31 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
}
/**
+ * beiscsi_flush_cq()- Flush the CQ created.
+ * @phba: ptr device priv structure.
+ *
+ * Before the connection resource are freed flush
+ * all the CQ enteries
+ **/
+static void beiscsi_flush_cq(struct beiscsi_hba *phba)
+{
+ uint16_t i;
+ struct be_eq_obj *pbe_eq;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ beiscsi_process_cq(pbe_eq);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+}
+
+/**
* beiscsi_close_conn - Upload the connection
* @ep: The iscsi endpoint
* @flag: The type of connection closure
@@ -1294,6 +1319,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
}
ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+
+ /* Flush the CQ entries */
+ beiscsi_flush_cq(phba);
+
return ret;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 31ddc8494398..e0b3b2d1f27a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 915c26b23ab6..30d74a06b993 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2068,7 +2068,7 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
* return
* Number of Completion Entries processed.
**/
-static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
@@ -2110,6 +2110,18 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
cri_index = BE_GET_CRI_FROM_CID(cid);
ep = phba->ep_array[cri_index];
+
+ if (ep == NULL) {
+ /* connection has already been freed
+ * just move on to next one
+ */
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : proc cqe of disconn ep: cid %d\n",
+ cid);
+ goto proc_next_cqe;
+ }
+
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@@ -2219,6 +2231,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
break;
}
+proc_next_cqe:
AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
queue_tail_inc(cq);
sol = queue_tail_node(cq);
@@ -4377,6 +4390,10 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
{
struct iscsi_boot_kobj *boot_kobj;
+ /* it has been created previously */
+ if (phba->boot_kset)
+ return 0;
+
/* get boot info using mgmt cmd */
if (beiscsi_get_boot_info(phba))
/* Try to see if we can carry on without this */
@@ -5206,6 +5223,7 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
free_irq(phba->pcidev->irq, phba);
}
pci_disable_msix(phba->pcidev);
+ cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
@@ -5227,7 +5245,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
hwi_cleanup(phba);
}
- cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
static void beiscsi_remove(struct pci_dev *pcidev)
@@ -5276,9 +5293,9 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
for (i = 0; i <= phba->num_cpus; i++)
phba->msix_entries[i].entry = i;
- status = pci_enable_msix(phba->pcidev, phba->msix_entries,
- (phba->num_cpus + 1));
- if (!status)
+ status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+ phba->num_cpus + 1, phba->num_cpus + 1);
+ if (status > 0)
phba->msix_enabled = true;
return;
@@ -5335,6 +5352,14 @@ static void be_eqd_update(struct beiscsi_hba *phba)
}
}
+static void be_check_boot_session(struct beiscsi_hba *phba)
+{
+ if (beiscsi_setup_boot_info(phba))
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Could not set up "
+ "iSCSI boot info on async event.\n");
+}
+
/*
* beiscsi_hw_health_check()- Check adapter health
* @work: work item to check HW health
@@ -5350,6 +5375,11 @@ beiscsi_hw_health_check(struct work_struct *work)
be_eqd_update(phba);
+ if (phba->state & BE_ADAPTER_CHECK_BOOT) {
+ phba->state &= ~BE_ADAPTER_CHECK_BOOT;
+ be_check_boot_session(phba);
+ }
+
beiscsi_ue_detect(phba);
schedule_delayed_work(&phba->beiscsi_hw_check_task,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 9ceab426eec9..7ee0ffc38514 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.2.273.0"
+#define BUILD_STR "10.4.114.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -104,6 +104,7 @@
#define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
#define BE_ADAPTER_STATE_SHUTDOWN 0x008
+#define BE_ADAPTER_CHECK_BOOT 0x010
#define BEISCSI_CLEAN_UNLOAD 0x01
@@ -839,6 +840,9 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
unsigned char rearm, unsigned char event);
+
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 665afcb74a56..681d4e8f003a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -943,17 +943,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
if (ip_action == IP_ACTION_ADD) {
memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
- ip_param->len);
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
if (subnet_param)
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
- subnet_param->value, subnet_param->len);
+ subnet_param->value,
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
} else {
memcpy(req->ip_params.ip_record.ip_addr.addr,
- if_info->ip_addr.addr, ip_param->len);
+ if_info->ip_addr.addr,
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
- if_info->ip_addr.subnet_mask, ip_param->len);
+ if_info->ip_addr.subnet_mask,
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
}
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
@@ -981,7 +984,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
req->action = gtway_action;
req->ip_addr.ip_type = BE2_IPV4;
- memcpy(req->ip_addr.addr, gt_addr, param_len);
+ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24a8fc577477..bd81446936fc 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c5891e66038..0679782d9d15 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1654,6 +1654,10 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
u64 addr;
int i;
+ /*
+ * Use dma_map_sg directly to ensure we're using the correct
+ * dev struct off of pcidev.
+ */
sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
scsi_sg_count(sc), sc->sc_data_direction);
scsi_for_each_sg(sc, sg, sg_count, i) {
@@ -1703,9 +1707,16 @@ static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
- if (io_req->bd_tbl->bd_valid && sc) {
- scsi_dma_unmap(sc);
+ /*
+ * Use dma_unmap_sg directly to ensure we're using the correct
+ * dev struct off of pcidev.
+ */
+ if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
+ dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
io_req->bd_tbl->bd_valid = 0;
}
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 40e22497d249..7a36388822aa 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2235,6 +2235,9 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_TGT_RESET_TMO:
case ISCSI_PARAM_IFACE_NAME:
case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
return S_IRUGO;
default:
return 0;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 49b1daa4476e..5db2d85195b1 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -94,7 +94,7 @@ enum {
};
struct csio_msix_entries {
- unsigned short vector; /* Vector assigned by pci_enable_msix */
+ unsigned short vector; /* Assigned MSI-X vector */
void *dev_id; /* Priv object associated w/ this msix*/
char desc[24]; /* Description of this vector */
};
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 7ee9777ae2c5..a8c748a35f9c 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -499,7 +499,7 @@ csio_reduce_sqsets(struct csio_hw *hw, int cnt)
static int
csio_enable_msix(struct csio_hw *hw)
{
- int rv, i, j, k, n, min, cnt;
+ int i, j, k, n, min, cnt;
struct csio_msix_entries *entryp;
struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS;
@@ -521,21 +521,15 @@ csio_enable_msix(struct csio_hw *hw)
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
- while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
- cnt = rv;
- if (!rv) {
- if (cnt < (hw->num_sqsets + extra)) {
- csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
- csio_reduce_sqsets(hw, cnt - extra);
- }
- } else {
- if (rv > 0) {
- pci_disable_msix(hw->pdev);
- csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
- }
-
+ cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
+ if (cnt < 0) {
kfree(entries);
- return -ENOMEM;
+ return cnt;
+ }
+
+ if (cnt < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+ csio_reduce_sqsets(hw, cnt - extra);
}
/* Save off vectors */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 79788a12712d..02e69e7ee4a3 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this,
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
event_dev = vlan_dev_real_dev(event_dev);
- cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
+ cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL);
if (!cdev)
return ret;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d65df6dc106f..6a2001d6b442 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
static LIST_HEAD(cdev_list);
static DEFINE_MUTEX(cdev_mutex);
+static LIST_HEAD(cdev_rcu_list);
+static DEFINE_SPINLOCK(cdev_rcu_lock);
+
int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
unsigned int max_conn)
{
@@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra,
list_add_tail(&cdev->list_head, &cdev_list);
mutex_unlock(&cdev_mutex);
+ spin_lock(&cdev_rcu_lock);
+ list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
+ spin_unlock(&cdev_rcu_lock);
+
log_debug(1 << CXGBI_DBG_DEV,
"cdev 0x%p, p# %u.\n", cdev, nports);
return cdev;
@@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev)
log_debug(1 << CXGBI_DBG_DEV,
"cdev 0x%p, p# %u,%s.\n",
cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
+
mutex_lock(&cdev_mutex);
list_del(&cdev->list_head);
mutex_unlock(&cdev_mutex);
+
+ spin_lock(&cdev_rcu_lock);
+ list_del_rcu(&cdev->rcu_node);
+ spin_unlock(&cdev_rcu_lock);
+ synchronize_rcu();
+
cxgbi_device_destroy(cdev);
}
EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
@@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag)
mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
if ((cdev->flags & flag) == flag) {
- log_debug(1 << CXGBI_DBG_DEV,
- "cdev 0x%p, p# %u,%s.\n",
- cdev, cdev->nports, cdev->nports ?
- cdev->ports[0]->name : "");
- list_del(&cdev->list_head);
- cxgbi_device_destroy(cdev);
+ mutex_unlock(&cdev_mutex);
+ cxgbi_device_unregister(cdev);
+ mutex_lock(&cdev_mutex);
}
}
mutex_unlock(&cdev_mutex);
@@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
}
}
mutex_unlock(&cdev_mutex);
+
log_debug(1 << CXGBI_DBG_DEV,
"lldev 0x%p, NO match found.\n", lldev);
return NULL;
@@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
}
EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
+ int *port)
+{
+ struct net_device *vdev = NULL;
+ struct cxgbi_device *cdev;
+ int i;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ vdev = ndev;
+ ndev = vlan_dev_real_dev(ndev);
+ pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
+ for (i = 0; i < cdev->nports; i++) {
+ if (ndev == cdev->ports[i]) {
+ cdev->hbas[i]->vdev = vdev;
+ rcu_read_unlock();
+ if (port)
+ *port = i;
+ return cdev;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
+
static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
int *port)
{
@@ -1807,7 +1852,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
u32 credits;
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
+ "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
csk->rcv_wup, cdev->rx_credit_thres,
cdev->rcv_win);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index b3e6e7541cc5..1d98fad6a0ab 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -527,6 +527,7 @@ struct cxgbi_ports_map {
#define CXGBI_FLAG_IPV4_SET 0x10
struct cxgbi_device {
struct list_head list_head;
+ struct list_head rcu_node;
unsigned int flags;
struct net_device **ports;
void *lldev;
@@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *);
void cxgbi_device_unregister_all(unsigned int flag);
struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
+ int *);
int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
struct scsi_host_template *,
struct scsi_transport_template *);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 67283ef418ac..072f0ec2851e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2363,6 +2363,7 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
host->unique_id = (u32)sys_tbl_pa + pHba->unit;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
+ host->use_cmd_list = 1;
return 0;
}
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 813dd5c998e4..943ad3a19661 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -837,7 +837,6 @@ struct hostdata {
static struct Scsi_Host *sh[MAX_BOARDS];
static const char *driver_name = "EATA";
static char sha[MAX_BOARDS];
-static DEFINE_SPINLOCK(driver_lock);
/* Initialize num_boards so that ihdlr can work while detect is in progress */
static unsigned int num_boards = MAX_BOARDS;
@@ -1097,8 +1096,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
goto fail;
}
- spin_lock_irq(&driver_lock);
-
if (do_dma(port_base, 0, READ_CONFIG_PIO)) {
#if defined(DEBUG_DETECT)
printk("%s: detect, do_dma failed at 0x%03lx.\n", name,
@@ -1264,10 +1261,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
}
#endif
- spin_unlock_irq(&driver_lock);
sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata));
- spin_lock_irq(&driver_lock);
-
if (shost == NULL) {
printk("%s: unable to register host, detaching.\n", name);
goto freedma;
@@ -1344,8 +1338,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
else
sprintf(dma_name, "DMA %u", dma_channel);
- spin_unlock_irq(&driver_lock);
-
for (i = 0; i < shost->can_queue; i++)
ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev,
&ha->cp[i],
@@ -1438,7 +1430,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
freeirq:
free_irq(irq, &sha[j]);
freelock:
- spin_unlock_irq(&driver_lock);
release_region(port_base, REGION_SIZE);
fail:
return 0;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 74277c20f6a5..bdc89899561a 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -96,14 +96,32 @@ int fcoe_link_speed_update(struct fc_lport *lport)
struct ethtool_cmd ecmd;
if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT |
+ FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_20GBIT |
+ FC_PORTSPEED_40GBIT);
+
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |=
- FC_PORTSPEED_10GBIT;
+
+ if (ecmd.supported & (SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC))
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
+ if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
+
switch (ethtool_cmd_speed(&ecmd)) {
case SPEED_1000:
lport->link_speed = FC_PORTSPEED_1GBIT;
@@ -111,6 +129,15 @@ int fcoe_link_speed_update(struct fc_lport *lport)
case SPEED_10000:
lport->link_speed = FC_PORTSPEED_10GBIT;
break;
+ case 20000:
+ lport->link_speed = FC_PORTSPEED_20GBIT;
+ break;
+ case 40000:
+ lport->link_speed = FC_PORTSPEED_40GBIT;
+ break;
+ default:
+ lport->link_speed = FC_PORTSPEED_UNKNOWN;
+ break;
}
return 0;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 1d3521e13d77..bf8d34c26f13 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.10"
+#define DRV_VERSION "1.6.0.11"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 2c613bdea78f..5980c10c734d 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -210,7 +210,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
else if (*trace_type == fc_trc_flag->fc_clear)
fnic_fc_trace_cleared = val;
else
- pr_err("fnic: cannot write to any debufs file\n");
+ pr_err("fnic: cannot write to any debugfs file\n");
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1b948f633fc5..f3984b48f8e9 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -35,7 +35,7 @@
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
-static u8 fcoe_all_fcfs[ETH_ALEN];
+static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
@@ -101,13 +101,14 @@ void fnic_handle_link(struct work_struct *work)
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
- } else
+ } else {
/* UP -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fc_trace_set_data(
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_UP",
strlen("Link Status: UP_UP"));
+ }
}
} else if (fnic->link_status) {
/* DOWN -> UP */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index c77285926827..acf1f95cb5c5 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -592,7 +592,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
if (fnic_fc_trace_cleared == 1) {
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
- pr_info("fnic: Reseting the read idx\n");
+ pr_info("fnic: Resetting the read idx\n");
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
fnic_fc_trace_max_pages * PAGE_SIZE);
fnic_fc_trace_cleared = 0;
@@ -743,7 +743,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
len += snprintf(fnic_dbgfs_prt->buffer + len,
- (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ max_size - len,
fmt,
tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
tm.tm_hour, tm.tm_min, tm.tm_sec,
@@ -767,8 +767,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
j == ethhdr_len + fcoehdr_len + fchdr_len ||
(i > 3 && j%fchdr_len == 0)) {
len += snprintf(fnic_dbgfs_prt->buffer
- + len, (fnic_fc_trace_max_pages
- * PAGE_SIZE * 3) - len,
+ + len, max_size - len,
"\n\t\t\t\t\t\t\t\t");
i++;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6b35d0dfe64c..cef5d49b59cd 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5971,10 +5971,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
/* Save the PCI command register */
pci_read_config_word(pdev, 4, &command_register);
- /* Turn the board off. This is so that later pci_restore_state()
- * won't turn the board on before the rest of config space is ready.
- */
- pci_disable_device(pdev);
pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
@@ -6022,11 +6018,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
goto unmap_cfgtable;
pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_warn(&pdev->dev, "failed to enable device.\n");
- goto unmap_cfgtable;
- }
pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
@@ -6159,26 +6150,22 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
h->msix_vector = MAX_REPLY_QUEUES;
if (h->msix_vector > num_online_cpus())
h->msix_vector = num_online_cpus();
- err = pci_enable_msix(h->pdev, hpsa_msix_entries,
- h->msix_vector);
- if (err > 0) {
+ err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
+ 1, h->msix_vector);
+ if (err < 0) {
+ dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
+ h->msix_vector = 0;
+ goto single_msi_mode;
+ } else if (err < h->msix_vector) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
- h->msix_vector = err;
- err = pci_enable_msix(h->pdev, hpsa_msix_entries,
- h->msix_vector);
- }
- if (!err) {
- for (i = 0; i < h->msix_vector; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- return;
- } else {
- dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
- err);
- h->msix_vector = 0;
- goto default_int_mode;
}
+ h->msix_vector = err;
+ for (i = 0; i < h->msix_vector; i++)
+ h->intr[i] = hpsa_msix_entries[i].vector;
+ return;
}
+single_msi_mode:
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
dev_info(&h->pdev->dev, "MSI\n");
if (!pci_enable_msi(h->pdev))
@@ -6541,6 +6528,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
if (!reset_devices)
return 0;
+ /* kdump kernel is loading, we don't know in which state is
+ * the pci interface. The dev->enable_cnt is equal zero
+ * so we call enable+disable, wait a while and switch it on.
+ */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+ pci_disable_device(pdev);
+ msleep(260); /* a randomly chosen number */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ return -ENODEV;
+ }
+ pci_set_master(pdev);
/* Reset the controller with a PCI power-cycle or via doorbell */
rc = hpsa_kdump_hard_reset_controller(pdev);
@@ -6549,10 +6553,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
* "performant mode". Or, it might be 640x, which can't reset
* due to concerns about shared bbwc between 6402/6404 pair.
*/
- if (rc == -ENOTSUPP)
- return rc; /* just try to do the kdump anyhow. */
- if (rc)
- return -ENODEV;
+ if (rc) {
+ if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
+ rc = -ENODEV;
+ goto out_disable;
+ }
/* Now try to get the controller to respond to a no-op */
dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
@@ -6563,7 +6568,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
dev_warn(&pdev->dev, "no-op failed%s\n",
(i < 11 ? "; re-trying" : ""));
}
- return 0;
+
+out_disable:
+
+ pci_disable_device(pdev);
+ return rc;
}
static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
@@ -6743,6 +6752,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
iounmap(h->transtable);
if (h->cfgtable)
iounmap(h->cfgtable);
+ pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
kfree(h);
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 924b0ba74dfe..2a9578c116b7 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2440,6 +2440,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
{
u32 ioasc;
int error_index;
+ struct ipr_hostrcb_type_21_error *error;
if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
return;
@@ -2464,6 +2465,15 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
if (!ipr_error_table[error_index].log_hcam)
return;
+ if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
+ hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
+ error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+ if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
+ ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+ return;
+ }
+
ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
/* Set indication we have logged an error */
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 31ed126f7143..d0201ceb4aac 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -130,6 +130,7 @@
#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500
#define IPR_IOASC_IOASC_MASK 0xFFFFFF00
#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF
+#define IPR_IOASC_HW_CMD_FAILED 0x046E0000
#define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000
#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000
#define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index a669f2d11c31..427af0f24b0f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -726,13 +726,18 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
if (!tcp_sw_conn || !tcp_sw_conn->sock) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
- rc = kernel_getpeername(tcp_sw_conn->sock,
- (struct sockaddr *)&addr, &len);
+ if (param == ISCSI_PARAM_LOCAL_PORT)
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ else
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->frwd_lock);
if (rc)
return rc;
@@ -895,6 +900,7 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_DATADGST_EN:
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_LOCAL_PORT:
case ISCSI_PARAM_EXP_STATSN:
case ISCSI_PARAM_PERSISTENT_ADDRESS:
case ISCSI_PARAM_PERSISTENT_PORT:
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 8d65a51a7598..c11a638f32e6 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -296,9 +296,9 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
BUG_ON(type >= FC_FC4_PROV_SIZE);
mutex_lock(&fc_prov_mutex);
if (prov->recv)
- rcu_assign_pointer(fc_passive_prov[type], NULL);
+ RCU_INIT_POINTER(fc_passive_prov[type], NULL);
else
- rcu_assign_pointer(fc_active_prov[type], NULL);
+ RCU_INIT_POINTER(fc_active_prov[type], NULL);
mutex_unlock(&fc_prov_mutex);
synchronize_rcu();
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 191b59793519..0d8bc6c66650 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3505,6 +3505,7 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
break;
case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_LOCAL_PORT:
if (sin)
len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
else
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 6eed9e76a166..2f9b96826ac0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3385,7 +3385,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
return -EINVAL;
- strcpy(bucket_data, buf);
+ strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
str_ptr = &bucket_data[0];
/* Ignore this token - this is command token */
token = strsep(&str_ptr, "\t ");
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 5b5c825d9576..a7bf359aa0c6 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -656,7 +656,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_nodelist *ndlp = rdata->pnode;
uint32_t elscmd;
uint32_t cmdsize;
- uint32_t rspsize;
struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0;
struct bsg_job_data *dd_data;
@@ -687,7 +686,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
elscmd = job->request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
- rspsize = job->reply_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
@@ -2251,7 +2249,6 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
- rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3140 Timeout waiting for link to "
"diagnostic mode_end, timeout:%d ms\n",
@@ -2291,7 +2288,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
LPFC_MBOXQ_t *pmboxq;
struct sli4_link_diag *link_diag_test_cmd;
uint32_t req_len, alloc_len;
- uint32_t timeout;
struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
@@ -2342,7 +2338,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
link_diag_test_cmd = (struct sli4_link_diag *)
job->request->rqst_data.h_vendor.vendor_cmd;
- timeout = link_diag_test_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2693,14 +2688,13 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
INIT_LIST_HEAD(&dmabuf->list);
/* now, allocate dma buffer */
- dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
- &(dmabuf->phys), GFP_KERNEL);
+ dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return NULL;
}
- memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
return dmabuf;
}
@@ -2828,8 +2822,10 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
size -= cnt;
}
- mlist->flag = i;
- return mlist;
+ if (mlist) {
+ mlist->flag = i;
+ return mlist;
+ }
out:
diag_cmd_data_free(phba, mlist);
return NULL;
@@ -3344,7 +3340,7 @@ job_error:
* will wake up thread waiting on the wait queue pointed by context1
* of the mailbox.
**/
-void
+static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
@@ -4593,7 +4589,7 @@ sli_cfg_ext_error:
* being reset) and com-plete the job, otherwise issue the mailbox command and
* let our completion handler finish the command.
**/
-static uint32_t
+static int
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index db5604f01a1a..00665a5d92fd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -451,7 +451,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
-void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index da61d8dc0449..61a32cd23f79 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1439,7 +1439,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #2 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
- strcpy(ae->un.Manufacturer, "Emulex Corporation");
+ strncpy(ae->un.Manufacturer, "Emulex Corporation", 64);
len = strlen(ae->un.Manufacturer);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1449,7 +1449,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #3 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
- strcpy(ae->un.SerialNumber, phba->SerialNumber);
+ strncpy(ae->un.SerialNumber, phba->SerialNumber, 64);
len = strlen(ae->un.SerialNumber);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1459,7 +1459,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #4 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(MODEL);
- strcpy(ae->un.Model, phba->ModelName);
+ strncpy(ae->un.Model, phba->ModelName, 256);
len = strlen(ae->un.Model);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1469,7 +1469,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #5 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
- strcpy(ae->un.ModelDescription, phba->ModelDesc);
+ strncpy(ae->un.ModelDescription, phba->ModelDesc, 256);
len = strlen(ae->un.ModelDescription);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1500,7 +1500,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #7 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
- strcpy(ae->un.DriverVersion, lpfc_release_version);
+ strncpy(ae->un.DriverVersion,
+ lpfc_release_version, 256);
len = strlen(ae->un.DriverVersion);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1510,7 +1511,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* #8 HBA attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
- strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+ strncpy(ae->un.OptionROMVersion,
+ phba->OptionROMVersion, 256);
len = strlen(ae->un.OptionROMVersion);
len += (len & 3) ? (4 - (len & 3)) : 4;
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b0aedce3f54b..786a2aff7b59 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -269,7 +269,7 @@ static int
lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
int len = 0;
- int cnt, i, j, found, posted, low;
+ int i, j, found, posted, low;
uint32_t phys, raw_index, getidx;
struct lpfc_hbq_init *hip;
struct hbq_s *hbqs;
@@ -279,7 +279,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
if (phba->sli_rev != 3)
return 0;
- cnt = LPFC_HBQINFO_SIZE;
+
spin_lock_irq(&phba->hbalock);
/* toggle between multiple hbqs, if any */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1a6fe524940d..6977027979be 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -78,7 +78,8 @@ struct lpfc_nodelist {
struct list_head nlp_listp;
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
- uint32_t nlp_flag; /* entry flags */
+ uint32_t nlp_flag; /* entry flags */
+ uint32_t nlp_add_flag; /* additional flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
uint16_t nlp_type;
@@ -157,6 +158,9 @@ struct lpfc_node_rrq {
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
+/* Defines for nlp_add_flag (uint32) */
+#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */
+
/* ndlp usage management macros */
#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
& NLP_USG_NODE_ACT_BIT) \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7a5d81a65be8..4c25485aa934 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1084,7 +1084,8 @@ stop_rr_fcf_flogi:
* accessing it.
*/
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
-
+ if (!prsp)
+ goto out;
sp = prsp->virt + sizeof(uint32_t);
/* FLOGI completes successfully */
@@ -1828,7 +1829,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *prsp;
- int disc, rc, did, type;
+ int disc, rc;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1873,10 +1874,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* ndlp could be freed in DSM, save these values now */
- type = ndlp->nlp_type;
- did = ndlp->nlp_DID;
-
if (irsp->ulpStatus) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -2269,8 +2266,6 @@ lpfc_adisc_done(struct lpfc_vport *vport)
void
lpfc_more_adisc(struct lpfc_vport *vport)
{
- int sentadisc;
-
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
/* Continue discovery with <num_disc_nodes> ADISCs to go */
@@ -2283,7 +2278,7 @@ lpfc_more_adisc(struct lpfc_vport *vport)
if (vport->fc_flag & FC_NLP_MORE) {
lpfc_set_disctmo(vport);
/* go thru NPR nodes and issue any remaining ELS ADISCs */
- sentadisc = lpfc_els_disc_adisc(vport);
+ lpfc_els_disc_adisc(vport);
}
if (!vport->num_disc_nodes)
lpfc_adisc_done(vport);
@@ -3027,10 +3022,9 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- uint32_t cmd, did, retry;
+ uint32_t cmd, retry;
spin_lock_irq(shost->host_lock);
- did = ndlp->nlp_DID;
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
@@ -5288,10 +5282,9 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
IOCB_t *icmd;
RNID *rn;
struct ls_rjt stat;
- uint32_t cmd, did;
+ uint32_t cmd;
icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6693,6 +6686,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvFrame++;
+ /*
+ * Do not process any unsolicited ELS commands
+ * if the ndlp is in DEV_LOSS
+ */
+ if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
+ goto dropit;
+
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->vport = vport;
@@ -7514,6 +7514,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
sp = prsp->virt + sizeof(uint32_t);
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
memcpy(&vport->fabric_portname, &sp->portName,
@@ -8187,9 +8189,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
list_del(&sglq_entry->list);
ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
+ spin_lock(&pring->ring_lock);
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list);
sglq_entry->state = SGL_FREED;
+ spin_unlock(&pring->ring_lock);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_set_rrq_active(phba, ndlp,
@@ -8208,12 +8212,15 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
+ spin_lock(&pring->ring_lock);
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+ spin_unlock(&pring->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
+ spin_unlock(&pring->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2a17e31265b8..5452f1f4220e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -150,9 +150,30 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* If the WWPN of the rport and ndlp don't match, ignore it */
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "6789 rport name %lx != node port name %lx",
+ (unsigned long)rport->port_name,
+ (unsigned long)wwn_to_u64(
+ ndlp->nlp_portname.u.wwn));
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
put_device(&rport->dev);
return;
}
+
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
}
evtp = &ndlp->dev_loss_evt;
@@ -161,6 +182,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS;
spin_lock_irq(&phba->hbalock);
/* We need to hold the node by incrementing the reference
@@ -201,8 +223,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
rport = ndlp->rport;
- if (!rport)
+ if (!rport) {
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
return fcf_inuse;
+ }
rdata = rport->dd_data;
name = (uint8_t *) &ndlp->nlp_portname;
@@ -235,6 +259,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
@@ -250,6 +275,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
return fcf_inuse;
}
@@ -259,6 +285,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
@@ -269,6 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
/* flush the target */
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
@@ -297,6 +325,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
@@ -995,7 +1024,6 @@ lpfc_linkup(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i;
- lpfc_cleanup_wt_rrqs(phba);
phba->link_state = LPFC_LINK_UP;
/* Unblock fabric iocbs if they are blocked */
@@ -2042,7 +2070,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
* returns:
* 0=success 1=failure
**/
-int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
+ uint16_t fcf_index,
struct fcf_record *new_fcf_record)
{
uint16_t current_fcf_pri;
@@ -2146,7 +2175,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
uint16_t vlan_id;
- uint32_t seed;
bool select_new_fcf;
int rc;
@@ -2383,9 +2411,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcf.fcf_flag |= FCF_AVAILABLE;
/* Setup initial running random FCF selection count */
phba->fcf.eligible_fcf_cnt = 1;
- /* Seeding the random number generator for random selection */
- seed = (uint32_t)(0xFFFFFFFF & jiffies);
- prandom_seed(seed);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -2678,7 +2703,7 @@ out:
*
* This function handles completion of init vfi mailbox command.
*/
-void
+static void
lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
@@ -4438,7 +4463,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* This function will issue an ELS LOGO command after completing
* the UNREG_RPI.
**/
-void
+static void
lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -5006,7 +5031,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
- int did_changed;
if (!lpfc_is_link_up(phba)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
@@ -5025,11 +5049,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
lpfc_set_disctmo(vport);
- if (vport->fc_prevDID == vport->fc_myDID)
- did_changed = 0;
- else
- did_changed = 1;
-
vport->fc_prevDID = vport->fc_myDID;
vport->num_disc_nodes = 0;
@@ -6318,7 +6337,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
uint8_t *buff,
uint32_t size)
{
- uint32_t offset = 0, rec_length;
+ uint32_t offset = 0;
uint8_t *rec_ptr;
/*
@@ -6345,8 +6364,6 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
}
offset += 4;
- rec_length = buff[offset + 1];
-
/* Read FCoE param record */
rec_ptr = lpfc_get_rec_conf23(&buff[offset],
size - offset, FCOE_PARAM_TYPE);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5769a9960ac..0b2c53af85c7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -306,10 +306,10 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
- sprintf(phba->OptionROMVersion, "%d.%d%d",
+ snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
prg->ver, prg->rev, prg->lev);
else
- sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
+ snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
prg->ver, prg->rev, prg->lev,
dist, prg->num);
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -649,7 +649,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
* 0 - success
* Any other value - error
**/
-int
+static int
lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
{
return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
@@ -750,7 +750,7 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
* 0 - success
* Any other value - error
**/
-int
+static int
lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
{
LPFC_MBOXQ_t *pmb;
@@ -988,9 +988,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
lpfc_hba_free_post_buf(phba);
lpfc_hba_clean_txcmplq(phba);
+ pring = &psli->ring[LPFC_ELS_RING];
/* At this point in time the HBA is either reset or DOA. Either
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
@@ -1008,8 +1011,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
+ spin_lock(&pring->ring_lock);
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
/* abts_scsi_buf_list_lock required because worker thread uses this
* list.
@@ -3047,6 +3052,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
LIST_HEAD(els_sgl_list);
LIST_HEAD(scsi_sgl_list);
int rc;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* update on pci function's els xri-sgl list
@@ -3087,7 +3093,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
@@ -3097,7 +3105,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
els_xri_cnt);
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* release extra els sgls from list */
for (i = 0; i < xri_cnt; i++) {
@@ -3110,7 +3120,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
}
}
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
} else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3165,9 +3177,11 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
for (i = 0; i < scsi_xri_cnt; i++) {
list_remove_head(&scsi_sgl_list, psb,
struct lpfc_scsi_buf, list);
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
- psb->dma_handle);
- kfree(psb);
+ if (psb) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ }
}
spin_lock_irq(&phba->scsi_buf_list_get_lock);
phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
@@ -3550,7 +3564,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
* list, and then worker thread shall be waked up for processing from the
* worker thread context.
**/
-void
+static void
lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
@@ -5680,10 +5694,13 @@ static void
lpfc_free_els_sgl_list(struct lpfc_hba *phba)
{
LIST_HEAD(sglq_list);
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/* Retrieve all els sgls from driver list */
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Now free the sgl list */
@@ -5848,16 +5865,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
if (!dmabuf)
return NULL;
- dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- LPFC_HDR_TEMPLATE_SIZE,
- &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
rpi_hdr = NULL;
goto err_free_dmabuf;
}
- memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
rpi_hdr = NULL;
goto err_free_coherent;
@@ -6246,14 +6261,11 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
- SLI2_SLIM_SIZE,
- &phba->slim2p.phys,
- GFP_KERNEL);
+ phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
if (!phba->slim2p.virt)
goto out_iounmap;
- memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
phba->mbox_ext = (phba->slim2p.virt +
offsetof(struct lpfc_sli2_slim, mbx_ext_words));
@@ -6618,15 +6630,12 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
* plus an alignment restriction of 16 bytes.
*/
bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
- dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- bmbx_size,
- &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
}
- memset(dmabuf->virt, 0, bmbx_size);
/*
* Initialize the bootstrap mailbox pointers now so that the register
@@ -6710,7 +6719,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
- uint32_t desc_count;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -6841,7 +6849,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
/* search for fc_fcoe resrouce descriptor */
get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
- desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
@@ -7417,7 +7424,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0523 Failed setup of fast-path EQ "
- "(%d), rc = 0x%x\n", fcp_eqidx, rc);
+ "(%d), rc = 0x%x\n", fcp_eqidx,
+ (uint32_t)rc);
goto out_destroy_hba_eq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7448,7 +7456,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
- "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+ "CQ (%d), rc = 0x%x\n", fcp_cqidx,
+ (uint32_t)rc);
goto out_destroy_fcp_cq;
}
@@ -7488,7 +7497,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0535 Failed setup of fast-path FCP "
- "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+ "WQ (%d), rc = 0x%x\n", fcp_wqidx,
+ (uint32_t)rc);
goto out_destroy_fcp_wq;
}
@@ -7521,7 +7531,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0529 Failed setup of slow-path mailbox CQ: "
- "rc = 0x%x\n", rc);
+ "rc = 0x%x\n", (uint32_t)rc);
goto out_destroy_fcp_wq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7541,7 +7551,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0531 Failed setup of slow-path ELS CQ: "
- "rc = 0x%x\n", rc);
+ "rc = 0x%x\n", (uint32_t)rc);
goto out_destroy_mbx_cq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7585,7 +7595,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0537 Failed setup of slow-path ELS WQ: "
- "rc = 0x%x\n", rc);
+ "rc = 0x%x\n", (uint32_t)rc);
goto out_destroy_mbx_wq;
}
@@ -7617,7 +7627,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0541 Failed setup of Receive Queue: "
- "rc = 0x%x\n", rc);
+ "rc = 0x%x\n", (uint32_t)rc);
goto out_destroy_fcp_wq;
}
@@ -7896,7 +7906,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
uint32_t rc = 0, if_type;
uint32_t shdr_status, shdr_add_status;
- uint32_t rdy_chk, num_resets = 0, reset_again = 0;
+ uint32_t rdy_chk;
+ uint32_t port_reset = 0;
union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_register reg_data;
uint16_t devid;
@@ -7936,9 +7947,42 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
}
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- for (num_resets = 0;
- num_resets < MAX_IF_TYPE_2_RESETS;
- num_resets++) {
+wait:
+ /*
+ * Poll the Port Status Register and wait for RDY for
+ * up to 30 seconds. If the port doesn't respond, treat
+ * it as an error.
+ */
+ for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.
+ STATUSregaddr, &reg_data.word0)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+ break;
+ msleep(20);
+ }
+
+ if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
+ phba->work_status[0] = readl(
+ phba->sli4_hba.u.if_type2.ERR1regaddr);
+ phba->work_status[1] = readl(
+ phba->sli4_hba.u.if_type2.ERR2regaddr);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2890 Port not ready, port status reg "
+ "0x%x error 1=0x%x, error 2=0x%x\n",
+ reg_data.word0,
+ phba->work_status[0],
+ phba->work_status[1]);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ if (!port_reset) {
+ /*
+ * Reset the port now
+ */
reg_data.word0 = 0;
bf_set(lpfc_sliport_ctrl_end, &reg_data,
LPFC_SLIPORT_LITTLE_ENDIAN);
@@ -7949,64 +7993,16 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
/* flush */
pci_read_config_word(phba->pcidev,
PCI_DEVICE_ID, &devid);
- /*
- * Poll the Port Status Register and wait for RDY for
- * up to 10 seconds. If the port doesn't respond, treat
- * it as an error. If the port responds with RN, start
- * the loop again.
- */
- for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
- msleep(10);
- if (lpfc_readl(phba->sli4_hba.u.if_type2.
- STATUSregaddr, &reg_data.word0)) {
- rc = -ENODEV;
- goto out;
- }
- if (bf_get(lpfc_sliport_status_rn, &reg_data))
- reset_again++;
- if (bf_get(lpfc_sliport_status_rdy, &reg_data))
- break;
- }
-
- /*
- * If the port responds to the init request with
- * reset needed, delay for a bit and restart the loop.
- */
- if (reset_again && (rdy_chk < 1000)) {
- msleep(10);
- reset_again = 0;
- continue;
- }
- /* Detect any port errors. */
- if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
- (rdy_chk >= 1000)) {
- phba->work_status[0] = readl(
- phba->sli4_hba.u.if_type2.ERR1regaddr);
- phba->work_status[1] = readl(
- phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2890 Port error detected during port "
- "reset(%d): wait_tmo:%d ms, "
- "port status reg 0x%x, "
- "error 1=0x%x, error 2=0x%x\n",
- num_resets, rdy_chk*10,
- reg_data.word0,
- phba->work_status[0],
- phba->work_status[1]);
- rc = -ENODEV;
- }
-
- /*
- * Terminate the outer loop provided the Port indicated
- * ready within 10 seconds.
- */
- if (rdy_chk < 1000)
- break;
+ port_reset = 1;
+ msleep(20);
+ goto wait;
+ } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+ rc = -ENODEV;
+ goto out;
}
- /* delay driver action following IF_TYPE_2 function reset */
- msleep(100);
break;
+
case LPFC_SLI_INTF_IF_TYPE_1:
default:
break;
@@ -8014,11 +8010,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
out:
/* Catch the not-ready port failure after a port reset. */
- if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3317 HBA not functional: IP Reset Failed "
- "after (%d) retries, try: "
- "echo fw_reset > board_mode\n", num_resets);
+ "try: echo fw_reset > board_mode\n");
rc = -ENODEV;
}
@@ -8211,9 +8206,9 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-3 interface specs. The kernel function pci_enable_msix() is
- * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
- * invoked, enables either all or nothing, depending on the current
+ * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
+ * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
+ * once invoked, enables either all or nothing, depending on the current
* availability of PCI vector resources. The device driver is responsible
* for calling the individual request_irq() to register each MSI-X vector
* with a interrupt handler, which is done in this function. Note that
@@ -8237,8 +8232,8 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
phba->msix_entries[i].entry = i;
/* Configure MSI-X capability structure */
- rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
- ARRAY_SIZE(phba->msix_entries));
+ rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
+ LPFC_MSIX_VECTORS);
if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 PCI enable MSI-X failed (%d)\n", rc);
@@ -8775,16 +8770,14 @@ out:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
- * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
- * enables either all or nothing, depending on the current availability of
- * PCI vector resources. The device driver is responsible for calling the
- * individual request_irq() to register each MSI-X vector with a interrupt
- * handler, which is done in this function. Note that later when device is
- * unloading, the driver should always call free_irq() on all MSI-X vectors
- * it has done request_irq() on before calling pci_disable_msix(). Failure
- * to do so results in a BUG_ON() and a device will be left with MSI-X
- * enabled and leaks its vectors.
+ * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
+ * is called to enable the MSI-X vectors. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
*
* Return codes
* 0 - successful
@@ -8805,17 +8798,14 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->sli4_hba.msix_entries[index].entry = index;
vectors++;
}
-enable_msix_vectors:
- rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
- vectors);
- if (rc > 1) {
- vectors = rc;
- goto enable_msix_vectors;
- } else if (rc) {
+ rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
+ 2, vectors);
+ if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
goto vec_fail_out;
}
+ vectors = rc;
/* Log MSI-X vector assignment */
for (index = 0; index < vectors; index++)
@@ -8828,7 +8818,8 @@ enable_msix_vectors:
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
memset(&phba->sli4_hba.handler_name[index], 0, 16);
- sprintf((char *)&phba->sli4_hba.handler_name[index],
+ snprintf((char *)&phba->sli4_hba.handler_name[index],
+ LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1f292e29d566..06241f590c1e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1811,12 +1811,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
* page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
- &phyaddr, GFP_KERNEL);
+ viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE, &phyaddr,
+ GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
- memset(viraddr, 0, SLI4_PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index c342f6afd747..5cc1103d811e 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1031,6 +1031,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7862c5540861..b99399fe2548 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -306,7 +306,7 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
* depth for a scsi device. This function sets the queue depth to the new
* value and sends an event out to log the queue depth change.
**/
-int
+static int
lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
@@ -380,12 +380,14 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
{
unsigned long flags;
uint32_t evt_posted;
+ unsigned long expires;
spin_lock_irqsave(&phba->hbalock, flags);
atomic_inc(&phba->num_rsrc_err);
phba->last_rsrc_error_time = jiffies;
- if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+ expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
+ if (time_after(expires, jiffies)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
@@ -741,7 +743,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
*
* Returns: 0 = failure, non-zero number of successfully posted buffers.
**/
-int
+static int
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
struct list_head *post_sblist, int sb_count)
{
@@ -2965,7 +2967,7 @@ err:
* on the specified data using a CRC algorithmn
* using crc_t10dif.
*/
-uint16_t
+static uint16_t
lpfc_bg_crc(uint8_t *data, int count)
{
uint16_t crc = 0;
@@ -2981,7 +2983,7 @@ lpfc_bg_crc(uint8_t *data, int count)
* on the specified data using a CSUM algorithmn
* using ip_compute_csum.
*/
-uint16_t
+static uint16_t
lpfc_bg_csum(uint8_t *data, int count)
{
uint16_t ret;
@@ -2994,7 +2996,7 @@ lpfc_bg_csum(uint8_t *data, int count)
* This function examines the protection data to try to determine
* what type of T10-DIF error occurred.
*/
-void
+static void
lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct scatterlist *sgpe; /* s/g prot entry */
@@ -3464,7 +3466,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled)
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
+ lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
return 0;
}
@@ -3604,6 +3606,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+
return 0;
err:
if (lpfc_cmd->seg_cnt)
@@ -4874,6 +4884,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
if (lpfc_is_link_up(phba))
icmd->ulpCommand = CMD_ABORT_XRI_CN;
@@ -5327,7 +5339,13 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0722 Target Reset rport failure: rdata x%p\n", rdata);
- return FAILED;
+ spin_lock_irq(shost->host_lock);
+ pnode->nlp_flag &= ~NLP_NPR_ADISC;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ return FAST_IO_FAIL;
}
scsi_event.event_type = FC_REG_SCSI_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 32ada0505576..207a43d952fa 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -187,7 +187,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{
struct lpfc_mqe *temp_mqe;
struct lpfc_register doorbell;
- uint32_t host_index;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -202,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
q->phba->mbox = (MAILBOX_t *)temp_mqe;
/* Update the host index before invoking device */
- host_index = q->host_index;
q->host_index = ((q->host_index + 1) % q->entry_count);
/* Ring Doorbell */
@@ -786,42 +784,6 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
/**
- * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
- * @phba: Pointer to HBA context object.
- *
- * Remove all rrqs from the phba->active_rrq_list and free them by
- * calling __lpfc_clr_active_rrq
- *
- **/
-void
-lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
-{
- struct lpfc_node_rrq *rrq;
- struct lpfc_node_rrq *nextrrq;
- unsigned long next_time;
- unsigned long iflags;
- LIST_HEAD(rrq_list);
-
- if (phba->sli_rev != LPFC_SLI_REV4)
- return;
- spin_lock_irqsave(&phba->hbalock, iflags);
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
- list_splice_init(&phba->active_rrq_list, &rrq_list);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
-
- list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
- list_del(&rrq->list);
- lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- }
- if ((!list_empty(&phba->active_rrq_list)) &&
- (!(phba->pport->load_flag & FC_UNLOADING)))
-
- mod_timer(&phba->rrq_tmr, next_time);
-}
-
-
-/**
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
@@ -937,7 +899,7 @@ out:
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
*
- * This function is called with hbalock held. This function
+ * This function is called with the ring lock held. This function
* gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
@@ -1053,10 +1015,12 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag);
} else {
+ spin_lock_irqsave(&pring->ring_lock, iflag);
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
list_add_tail(&sglq->list,
&phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
/* Check if TXQ queue needs to be serviced */
if (!list_empty(&pring->txq))
@@ -2469,11 +2433,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t * irsp;
WORD5 * w5p;
uint32_t Rctl, Type;
- uint32_t match;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
- match = 0;
irsp = &(saveq->iocb);
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
@@ -2899,7 +2861,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
void lpfc_poll_eratt(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t eratt = 0, rem;
+ uint32_t eratt = 0;
uint64_t sli_intr, cnt;
phba = (struct lpfc_hba *)ptr;
@@ -2914,7 +2876,7 @@ void lpfc_poll_eratt(unsigned long ptr)
cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
/* 64-bit integer division not supporte on 32-bit x86 - use do_div */
- rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+ do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
phba->sli.slistat.sli_ips = cnt;
phba->sli.slistat.sli_prev_intr = sli_intr;
@@ -4864,15 +4826,12 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
* mailbox command.
*/
dma_size = *vpd_size;
- dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- dma_size,
- &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
}
- memset(dmabuf->virt, 0, dma_size);
/*
* The SLI4 implementation of READ_REV conflicts at word1,
@@ -5990,9 +5949,6 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
curr_blks++;
}
- /* Calculate the total requested length of the dma memory. */
- req_len = curr_blks * sizeof(uint16_t);
-
/*
* Calculate the size of an embedded mailbox. The uint32_t
* accounts for extents-specific word.
@@ -6101,14 +6057,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
struct lpfc_sglq *sglq_entry_first = NULL;
int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
+ struct lpfc_sli_ring *pring;
LIST_HEAD(prep_sgl_list);
LIST_HEAD(blck_sgl_list);
LIST_HEAD(allc_sgl_list);
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
+ pring = &phba->sli.ring[LPFC_ELS_RING];
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
total_cnt = phba->sli4_hba.els_xri_cnt;
@@ -6210,8 +6170,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* push els sgls posted to the availble list */
if (!list_empty(&post_sgl_list)) {
spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&post_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -6797,13 +6759,16 @@ void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
- MAILBOX_t *mb = &pmbox->u.mb;
+ MAILBOX_t *mb = NULL;
+
struct lpfc_sli *psli = &phba->sli;
/* If the mailbox completed, process the completion and return */
if (lpfc_sli4_process_missed_mbox_completions(phba))
return;
+ if (pmbox != NULL)
+ mb = &pmbox->u.mb;
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
* worklist and the mailbox actually completing. When this
@@ -8138,7 +8103,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
*
* Return: index into SLI4 fast-path FCP queue index.
**/
-static inline uint32_t
+static inline int
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
struct lpfc_vector_map_info *cpup;
@@ -8152,7 +8117,6 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
cpup += cpu;
return cpup->channel_id;
}
- chann = cpu;
}
chann = atomic_add_return(1, &phba->fcp_qidx);
chann = (chann % phba->cfg_fcp_io_channel);
@@ -8784,6 +8748,37 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
return 0;
}
+int
+lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb)
+{
+ uint32_t idx;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ /*
+ * fcp_wqidx should already be setup based on what
+ * completion queue we want to use.
+ */
+ if (!(phba->cfg_fof) ||
+ (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return LPFC_HBA_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return LPFC_HBA_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = idx;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
+ }
+ }
+ return ring_number;
+}
+
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
@@ -8809,61 +8804,42 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
int rc, idx;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (!phba->cfg_fof || (!(piocb->iocb_flag &
- LPFC_IO_OAS))) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
- } else {
- if (unlikely(!phba->sli4_hba.oas_wq))
- return IOCB_ERROR;
- idx = 0;
- piocb->fcp_wqidx = 0;
- ring_number = LPFC_FCP_OAS_RING;
- }
- pring = &phba->sli.ring[ring_number];
- spin_lock_irqsave(&pring->ring_lock, iflags);
- rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
- flag);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
+ if (unlikely(ring_number == LPFC_HBA_ERROR))
+ return IOCB_ERROR;
+ idx = piocb->fcp_wqidx;
- if (lpfc_fcp_look_ahead) {
- fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+ pring = &phba->sli.ring[ring_number];
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
- if (atomic_dec_and_test(&fcp_eq_hdl->
- fcp_eq_in_use)) {
+ if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
+ fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
- /* Get associated EQ with this index */
- fpeq = phba->sli4_hba.hba_eq[idx];
+ if (atomic_dec_and_test(&fcp_eq_hdl->
+ fcp_eq_in_use)) {
- /* Turn off interrupts from this EQ */
- lpfc_sli4_eq_clr_intr(fpeq);
+ /* Get associated EQ with this index */
+ fpeq = phba->sli4_hba.hba_eq[idx];
- /*
- * Process all the events on FCP EQ
- */
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- lpfc_sli4_hba_handle_eqe(phba,
- eqe, idx);
- fpeq->EQ_processed++;
- }
+ /* Turn off interrupts from this EQ */
+ lpfc_sli4_eq_clr_intr(fpeq);
- /* Always clear and re-arm the EQ */
- lpfc_sli4_eq_release(fpeq,
- LPFC_QUEUE_REARM);
+ /*
+ * Process all the events on FCP EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba,
+ eqe, idx);
+ fpeq->EQ_processed++;
}
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
- }
- } else {
- pring = &phba->sli.ring[ring_number];
- spin_lock_irqsave(&pring->ring_lock, iflags);
- rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
- flag);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ /* Always clear and re-arm the EQ */
+ lpfc_sli4_eq_release(fpeq,
+ LPFC_QUEUE_REARM);
+ }
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
}
} else {
/* For now, SLI2/3 will still use hbalock */
@@ -9746,6 +9722,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
+ int ring_number;
int retval;
unsigned long iflags;
@@ -9786,6 +9763,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
if (cmdiocb->iocb_flag & LPFC_IO_FCP)
abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocbp->iocb_flag |= LPFC_IO_FOF;
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -9802,6 +9781,11 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iotag);
if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number =
+ lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
+ if (unlikely(ring_number == LPFC_HBA_ERROR))
+ return 0;
+ pring = &phba->sli.ring[ring_number];
/* Note: both hbalock and ring_lock need to be set here */
spin_lock_irqsave(&pring->ring_lock, iflags);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -10099,6 +10083,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
if (iocbq->iocb_flag & LPFC_IO_FCP)
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -10146,7 +10132,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
{
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
+ struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *iocbq;
IOCB_t *icmd;
int sum, i, ret_val;
@@ -10198,8 +10186,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
if (iocbq->iocb_flag & LPFC_IO_FCP)
abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->iocb_flag & LPFC_IO_FOF)
+ abtsiocbq->iocb_flag |= LPFC_IO_FOF;
- if (lpfc_is_link_up(phba))
+ lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ ndlp = lpfc_cmd->rdata->pnode;
+
+ if (lpfc_is_link_up(phba) &&
+ (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
else
abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -12611,6 +12605,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ if (eqe == NULL)
+ break;
+
lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
@@ -12760,14 +12757,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
goto out_fail;
- dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- hw_page_size, &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ hw_page_size, &dmabuf->phys,
+ GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
- memset(dmabuf->virt, 0, hw_page_size);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
@@ -12845,7 +12841,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* memory this function will return -ENOMEM. If the queue create mailbox command
* fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
@@ -12931,7 +12927,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
* memory this function will return -ENOMEM. If the queue create mailbox command
* fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
{
struct lpfc_mbx_eq_create *eq_create;
@@ -13053,7 +13049,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
* memory this function will return -ENOMEM. If the queue create mailbox command
* fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
{
@@ -13394,7 +13390,7 @@ out:
* memory this function will return -ENOMEM. If the queue create mailbox command
* fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
struct lpfc_queue *cq, uint32_t subtype)
{
@@ -13630,7 +13626,7 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
* memory this function will return -ENOMEM. If the queue create mailbox command
* fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
{
@@ -13895,7 +13891,7 @@ out:
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
LPFC_MBOXQ_t *mbox;
@@ -13951,7 +13947,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
{
LPFC_MBOXQ_t *mbox;
@@ -14005,7 +14001,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
{
LPFC_MBOXQ_t *mbox;
@@ -14059,7 +14055,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
{
LPFC_MBOXQ_t *mbox;
@@ -14112,7 +14108,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return -ENXIO.
**/
-uint32_t
+int
lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq)
{
@@ -14252,7 +14248,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
"2511 POST_SGL mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- rc = -ENXIO;
}
return 0;
}
@@ -14270,7 +14265,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
* LPFC_RPI_ALLOC_ERROR if no rpis are available.
**/
-uint16_t
+static uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
{
unsigned long xri;
@@ -14300,7 +14295,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* This routine is invoked to release an xri to the pool of
* available rpis maintained by the driver.
**/
-void
+static void
__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
{
if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
@@ -14720,7 +14715,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
* the driver uses this time stamp to indicate if any received sequences have
* timed out.
**/
-void
+static void
lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
{
struct lpfc_dmabuf *h_buf;
@@ -15019,7 +15014,7 @@ uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
uint16_t xri)
{
- int i;
+ uint16_t i;
for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
if (xri == phba->sli4_hba.xri_ids[i])
@@ -15189,7 +15184,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* unsolicited sequence has been aborted. After that, it will issue a basic
* accept to accept the abort.
**/
-void
+static void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
struct hbq_dmabuf *dmabuf)
{
@@ -15734,7 +15729,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* This routine is invoked to release an rpi to the pool of
* available rpis maintained by the driver.
**/
-void
+static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
@@ -16172,7 +16167,7 @@ fail_fcf_read:
* returns:
* 1=success 0=failure
**/
-int
+static int
lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
{
uint16_t next_fcf_pri;
@@ -16403,7 +16398,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
* command. If the mailbox command returned failure, it will try to stop the
* FCF rediscover wait timer.
**/
-void
+static void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
@@ -16956,7 +16951,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
- int txq_cnt = 0;
+ uint32_t txq_cnt = 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index edb48832c39b..4a01452415cf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -79,6 +79,7 @@ struct lpfc_iocbq {
#define LPFC_FIP_ELS_ID_SHIFT 14
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
+#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7f50aa04d66a..22ceb2b05ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -670,22 +670,22 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
-uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
-uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
+int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
+int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
-uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
-uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
-uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *);
int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41675c1193e7..89413add2252 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "10.2.8001.0."
+#define LPFC_DRIVER_VERSION "10.4.8000.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 32166c2c7854..a49914de4b95 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.803.01.00-rc1"
-#define MEGASAS_RELDATE "Mar. 10, 2014"
-#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
+#define MEGASAS_VERSION "06.805.06.00-rc1"
+#define MEGASAS_RELDATE "Sep. 4, 2014"
+#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014"
/*
* Device IDs
@@ -105,6 +105,9 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
+#define MFI_STATE_FORCE_OCR 0x00000080
+#define MFI_STATE_DMADONE 0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE 0x00000004
#define MFI_RESET_REQUIRED 0x00000001
#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
@@ -191,6 +194,9 @@
#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
#define MR_DCMD_PD_LIST_QUERY 0x02010100
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+
/*
* Global functions
*/
@@ -264,6 +270,25 @@ enum MFI_STAT {
};
/*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+ UNAVAILABLE = 0,
+ AVAILABLE = 1,
+ COPYING = 2,
+ COPIED = 3,
+ COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+ MR_CRASH_BUF_TURN_OFF = 0,
+ MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
* Number of mailbox bytes in DCMD message frame
*/
#define MFI_MBOX_SIZE 12
@@ -365,7 +390,6 @@ enum MR_LD_QUERY_TYPE {
#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
#define MR_EVT_LD_OFFLINE 0x00fc
#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
-#define MAX_LOGICAL_DRIVES 64
enum MR_PD_STATE {
MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
@@ -443,14 +467,14 @@ struct MR_LD_LIST {
u8 state;
u8 reserved[3];
u64 size;
- } ldList[MAX_LOGICAL_DRIVES];
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
} __packed;
struct MR_LD_TARGETID_LIST {
u32 size;
u32 count;
u8 pad[3];
- u8 targetId[MAX_LOGICAL_DRIVES];
+ u8 targetId[MAX_LOGICAL_DRIVES_EXT];
};
@@ -916,6 +940,15 @@ struct megasas_ctrl_info {
* HA cluster information
*/
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:26;
+ u32 premiumFeatureMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 hwIncompatible:1;
+ u32 peerIsIncompatible:1;
+ u32 peerIsPresent:1;
+#else
u32 peerIsPresent:1;
u32 peerIsIncompatible:1;
u32 hwIncompatible:1;
@@ -923,6 +956,7 @@ struct megasas_ctrl_info {
u32 ctrlPropIncompatible:1;
u32 premiumFeatureMismatch:1;
u32 reserved:26;
+#endif
} cluster;
char clusterId[16]; /*7D4h */
@@ -933,7 +967,27 @@ struct megasas_ctrl_info {
u8 reserved; /*0x7E7*/
} iov;
- u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:25;
+ u32 supportCrashDump:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportThermalPollInterval:1;
+ u32 supportPersonalityChange:2;
+#else
+ u32 supportPersonalityChange:2;
+ u32 supportThermalPollInterval:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportCrashDump:1;
+ u32 reserved:25;
+#endif
+ } adapterOperations3;
+
+ u8 pad[0x800-0x7EC];
} __packed;
/*
@@ -942,13 +996,12 @@ struct megasas_ctrl_info {
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
-#define MEGASAS_MAX_LD_CHANNELS 1
+#define MEGASAS_MAX_LD_CHANNELS 2
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
#define MEGASAS_DEFAULT_INIT_ID -1
#define MEGASAS_MAX_LUN 8
-#define MEGASAS_MAX_LD 64
#define MEGASAS_DEFAULT_CMD_PER_LUN 256
#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
@@ -961,6 +1014,14 @@ struct megasas_ctrl_info {
#define MEGASAS_FW_BUSY 1
+#define VD_EXT_DEBUG 0
+
+enum MR_MFI_MPT_PTHR_FLAGS {
+ MFI_MPT_DETACHED = 0,
+ MFI_LIST_ADDED = 1,
+ MFI_MPT_ATTACHED = 2,
+};
+
/* Frame Type */
#define IO_FRAME 0
#define PTHRU_FRAME 1
@@ -978,7 +1039,7 @@ struct megasas_ctrl_info {
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
-
+#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
@@ -1133,13 +1194,19 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:30;
+ u32 reserved:27;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_max_255lds:1;
+ u32 reserved1:1;
u32 support_additional_msix:1;
u32 support_fp_remote_lun:1;
#else
u32 support_fp_remote_lun:1;
u32 support_additional_msix:1;
- u32 reserved:30;
+ u32 reserved1:1;
+ u32 support_max_255lds:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 reserved:27;
#endif
} mfi_capabilities;
u32 reg;
@@ -1559,6 +1626,20 @@ struct megasas_instance {
u32 *reply_queue;
dma_addr_t reply_queue_h;
+ u32 *crash_dump_buf;
+ dma_addr_t crash_dump_h;
+ void *crash_buf[MAX_CRASH_DUMP_SIZE];
+ u32 crash_buf_pages;
+ unsigned int fw_crash_buffer_size;
+ unsigned int fw_crash_state;
+ unsigned int fw_crash_buffer_offset;
+ u32 drv_buf_index;
+ u32 drv_buf_alloc;
+ u32 crash_dump_fw_support;
+ u32 crash_dump_drv_support;
+ u32 crash_dump_app_support;
+ spinlock_t crashdump_lock;
+
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
@@ -1577,7 +1658,7 @@ struct megasas_instance {
struct megasas_cmd **cmd_list;
struct list_head cmd_pool;
/* used to sync fire the cmd to fw */
- spinlock_t cmd_pool_lock;
+ spinlock_t mfi_pool_lock;
/* used to sync fire the cmd to fw */
spinlock_t hba_lock;
/* used to synch producer, consumer ptrs in dpc */
@@ -1606,6 +1687,7 @@ struct megasas_instance {
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
struct work_struct work_init;
+ struct work_struct crash_init;
u8 flag;
u8 unload;
@@ -1613,6 +1695,14 @@ struct megasas_instance {
u8 issuepend_done;
u8 disableOnlineCtrlReset;
u8 UnevenSpanSupport;
+
+ u8 supportmax256vd;
+ u16 fw_supported_vd_count;
+ u16 fw_supported_pd_count;
+
+ u16 drv_supported_vd_count;
+ u16 drv_supported_pd_count;
+
u8 adprecovery;
unsigned long last_time;
u32 mfiStatus;
@@ -1622,6 +1712,8 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
+ u32 ctrl_context_pages;
+ struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
@@ -1633,8 +1725,6 @@ struct megasas_instance {
struct timer_list sriov_heartbeat_timer;
char skip_heartbeat_timer_del;
u8 requestorId;
- u64 initiator_sas_address;
- u64 ld_sas_address[64];
char PlasmaFW111;
char mpio;
int throttlequeuedepth;
@@ -1661,6 +1751,7 @@ struct MR_LD_VF_AFFILIATION {
/* Plasma 1.11 FW backward compatibility structures */
#define IOV_111_OFFSET 0x7CE
#define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
struct IOV_111 {
u8 maxVFsSupported;
@@ -1754,6 +1845,11 @@ struct megasas_cmd {
struct list_head list;
struct scsi_cmnd *scmd;
+
+ void *mpt_pthr_cmd_blocked;
+ atomic_t mfi_mpt_pthr;
+ u8 is_wait_event;
+
struct megasas_instance *instance;
union {
struct {
@@ -1823,12 +1919,33 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance,
+ struct megasas_ctrl_info *ctrl_info);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 22a04e37b70a..f6a69a3b1b3f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.803.01.00-rc1
+ * Version : 06.805.06.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -89,6 +89,10 @@ module_param(resetwaittime, int, S_IRUGO);
MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
"before resetting adapter. Default: 180");
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -206,43 +210,66 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
unsigned long flags;
struct megasas_cmd *cmd = NULL;
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
if (!list_empty(&instance->cmd_pool)) {
cmd = list_entry((&instance->cmd_pool)->next,
struct megasas_cmd, list);
list_del_init(&cmd->list);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
} else {
printk(KERN_ERR "megasas: Command pool empty!\n");
}
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
return cmd;
}
/**
- * megasas_return_cmd - Return a cmd to free command pool
+ * __megasas_return_cmd - Return a cmd to free command pool
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
inline void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- unsigned long flags;
-
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ /*
+ * Don't go ahead and free the MFI frame, if corresponding
+ * MPT frame is not freed(valid for only fusion adapters).
+ * In case of MFI adapters, anyways for any allocated MFI
+ * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+ */
+ if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+ return;
cmd->scmd = NULL;
cmd->frame_count = 0;
+ cmd->is_wait_event = 0;
+ cmd->mpt_pthr_cmd_blocked = NULL;
+
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
- list_add_tail(&cmd->list, &instance->cmd_pool);
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ list_add(&cmd->list, (&instance->cmd_pool)->next);
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ __megasas_return_cmd(instance, cmd);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
@@ -921,13 +948,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
* Used to issue ioctl commands.
*/
-static int
+int
megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
cmd->cmd_status = ENODATA;
+ cmd->is_wait_event = 1;
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
@@ -1536,7 +1564,7 @@ out_return_cmd:
* @done: Callback entry point
*/
static int
-megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
unsigned long flags;
@@ -1558,7 +1586,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
} else {
spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
}
@@ -1566,7 +1594,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
@@ -1577,11 +1605,11 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
spin_unlock_irqrestore(&instance->hba_lock, flags);
- scmd->scsi_done = done;
scmd->result = 0;
if (MEGASAS_IS_LOGICAL(scmd) &&
- (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+ (scmd->device->id >= instance->fw_supported_vd_count ||
+ scmd->device->lun)) {
scmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
@@ -1606,12 +1634,10 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
return 0;
out_done:
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
-static DEF_SCSI_QCMD(megasas_queue_command)
-
static struct megasas_instance *megasas_lookup_instance(u16 host_no)
{
int i;
@@ -1628,36 +1654,12 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
static int megasas_slave_configure(struct scsi_device *sdev)
{
- u16 pd_index = 0;
- struct megasas_instance *instance ;
-
- instance = megasas_lookup_instance(sdev->host->host_no);
-
- /*
- * Don't export physical disk devices to the disk driver.
- *
- * FIXME: Currently we don't export them to the midlayer at all.
- * That will be fixed once LSI engineers have audited the
- * firmware for possible issues.
- */
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
- sdev->type == TYPE_DISK) {
- pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
- sdev->id;
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- blk_queue_rq_timeout(sdev->request_queue,
- MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
- return 0;
- }
- return -ENXIO;
- }
-
/*
* The RAID firmware may require extended timeouts.
*/
blk_queue_rq_timeout(sdev->request_queue,
MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+
return 0;
}
@@ -1666,18 +1668,15 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
u16 pd_index = 0;
struct megasas_instance *instance ;
instance = megasas_lookup_instance(sdev->host->host_no);
- if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
- (sdev->type == TYPE_DISK)) {
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
/*
* Open the OS scan to the SYSTEM PD
*/
pd_index =
(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
- if ((instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType ==
- TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
return 0;
}
return -ENXIO;
@@ -1825,16 +1824,12 @@ void megasas_do_ocr(struct megasas_instance *instance)
process_fw_state_change_wq(&instance->work_init);
}
-/* This function will get the current SR-IOV LD/VF affiliation */
-static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
- int initial)
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+ int initial)
{
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
- struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
- dma_addr_t new_affiliation_h;
dma_addr_t new_affiliation_111_h;
int ld, retval = 0;
u8 thisVf;
@@ -1842,15 +1837,15 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
- "affiliation: Failed to get cmd for scsi%d.\n",
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
+ "Failed to get cmd for scsi%d.\n",
instance->host->host_no);
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
- if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+ if (!instance->vf_affiliation_111) {
printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
"affiliation for scsi%d.\n", instance->host->host_no);
megasas_return_cmd(instance, cmd);
@@ -1858,38 +1853,22 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
}
if (initial)
- if (instance->PlasmaFW111)
memset(instance->vf_affiliation_111, 0,
sizeof(struct MR_LD_VF_AFFILIATION_111));
- else
- memset(instance->vf_affiliation, 0,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION));
else {
- if (instance->PlasmaFW111)
- new_affiliation_111 =
- pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
- else
- new_affiliation =
- pci_alloc_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
- if (!new_affiliation && !new_affiliation_111) {
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ if (!new_affiliation_111) {
printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d.\n",
- instance->host->host_no);
+ instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
- if (instance->PlasmaFW111)
- memset(new_affiliation_111, 0,
- sizeof(struct MR_LD_VF_AFFILIATION_111));
- else
- memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION));
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -1900,34 +1879,17 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
dcmd->flags = MFI_FRAME_DIR_BOTH;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- if (instance->PlasmaFW111) {
- dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
- } else {
- dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
- }
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
- if (initial) {
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].phys_addr =
- instance->vf_affiliation_111_h;
- else
- dcmd->sgl.sge32[0].phys_addr =
- instance->vf_affiliation_h;
- } else {
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
- else
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
- }
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].length =
- sizeof(struct MR_LD_VF_AFFILIATION_111);
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
else
- dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
@@ -1943,80 +1905,222 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
}
if (!initial) {
- if (instance->PlasmaFW111) {
- if (!new_affiliation_111->vdCount) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new "
- "LD/VF affiliation for passive path "
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+ new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
"for scsi%d.\n",
- instance->host->host_no);
- retval = 1;
- goto out;
- }
- thisVf = new_affiliation_111->thisVf;
- for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
- if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
- printk(KERN_WARNING "megasas: SR-IOV: "
- "Got new LD/VF affiliation "
- "for scsi%d.\n",
- instance->host->host_no);
- memcpy(instance->vf_affiliation_111,
- new_affiliation_111,
- sizeof(struct MR_LD_VF_AFFILIATION_111));
- retval = 1;
- goto out;
- }
- } else {
- if (!new_affiliation->ldCount) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new "
- "LD/VF affiliation for passive "
- "path for scsi%d.\n",
instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
retval = 1;
goto out;
}
- newmap = new_affiliation->map;
- savedmap = instance->vf_affiliation->map;
- thisVf = new_affiliation->thisVf;
- for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
- if (savedmap->policy[thisVf] !=
- newmap->policy[thisVf]) {
- printk(KERN_WARNING "megasas: SR-IOV: "
- "Got new LD/VF affiliation "
- "for scsi%d.\n",
- instance->host->host_no);
- memcpy(instance->vf_affiliation,
- new_affiliation,
- new_affiliation->size);
- retval = 1;
- goto out;
+ }
+out:
+ if (new_affiliation_111) {
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ int i, j, retval = 0, found = 0, doscan = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for passive path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (i = 0 ; i < new_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0; j < instance->vf_affiliation->ldCount;
+ j++) {
+ if (newmap->ref.targetId ==
+ savedmap->ref.targetId) {
+ found = 1;
+ if (newmap->policy[thisVf] !=
+ savedmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
}
savedmap = (struct MR_LD_VF_MAP *)
((unsigned char *)savedmap +
savedmap->size);
+ }
+ if (!found && newmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap + newmap->size);
+ }
+
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+
+ for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0 ; j < new_affiliation->ldCount; j++) {
+ if (savedmap->ref.targetId ==
+ newmap->ref.targetId) {
+ found = 1;
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
newmap = (struct MR_LD_VF_MAP *)
((unsigned char *)newmap +
newmap->size);
}
+ if (!found && savedmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
}
}
out:
- if (new_affiliation) {
- if (instance->PlasmaFW111)
- pci_free_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- new_affiliation_111,
- new_affiliation_111_h);
- else
- pci_free_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- new_affiliation, new_affiliation_h);
+ if (doscan) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ memcpy(instance->vf_affiliation, new_affiliation,
+ new_affiliation->size);
+ retval = 1;
}
- megasas_return_cmd(instance, cmd);
+
+ if (new_affiliation)
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return retval;
}
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ int retval;
+
+ if (instance->PlasmaFW111)
+ retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+ else
+ retval = megasas_get_ld_vf_affiliation_12(instance, initial);
+ return retval;
+}
+
/* This function will tell FW to start the SR-IOV heartbeat */
int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
int initial)
@@ -2459,7 +2563,12 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->abort_aen = 0;
instance->aen_cmd = NULL;
- megasas_return_cmd(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
if ((instance->unload == 0) &&
((instance->issuepend_done == 1))) {
@@ -2491,6 +2600,152 @@ static int megasas_change_queue_depth(struct scsi_device *sdev,
return queue_depth;
}
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ instance->fw_crash_buffer_offset = val;
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ buff_offset = instance->fw_crash_buffer_offset;
+ if (!instance->crash_dump_buf &&
+ !((instance->fw_crash_state == AVAILABLE) ||
+ (instance->fw_crash_state == COPYING))) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump is not available\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return -EINVAL;
+ }
+
+ buff_addr = (unsigned long) buf;
+
+ if (buff_offset >
+ (instance->fw_crash_buffer_size * dmachunk)) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump offset is out of range\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return 0;
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ (buff_offset % dmachunk);
+ memcpy(buf, (void *)src_addr, size);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+ return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+ ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ if ((val <= AVAILABLE || val > COPY_ERROR)) {
+ dev_err(&instance->pdev->dev, "application updates invalid "
+ "firmware crash state\n");
+ return -EINVAL;
+ }
+
+ instance->fw_crash_state = val;
+
+ if ((val == COPIED) || (val == COPY_ERROR)) {
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ megasas_free_host_crash_buffer(instance);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ if (val == COPY_ERROR)
+ dev_info(&instance->pdev->dev, "application failed to "
+ "copy Firmware crash dump\n");
+ else
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "copied successfully\n");
+ }
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+ megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+ megasas_page_size_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size,
+ &dev_attr_fw_crash_buffer,
+ &dev_attr_fw_crash_state,
+ &dev_attr_page_size,
+ NULL,
+};
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -2506,6 +2761,7 @@ static struct scsi_host_template megasas_template = {
.eh_bus_reset_handler = megasas_reset_bus_host,
.eh_host_reset_handler = megasas_reset_bus_host,
.eh_timed_out = megasas_reset_timer,
+ .shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = megasas_change_queue_depth,
@@ -2688,7 +2944,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
"failed, status = 0x%x.\n",
cmd->frame->hdr.cmd_status);
else {
- megasas_return_cmd(instance, cmd);
+ megasas_return_mfi_mpt_pthr(instance,
+ cmd, cmd->mpt_pthr_cmd_blocked);
spin_unlock_irqrestore(
instance->host->host_lock,
flags);
@@ -2696,7 +2953,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
}
} else
instance->map_id++;
- megasas_return_cmd(instance, cmd);
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
/*
* Set fast path IO to ZERO.
@@ -2852,7 +3110,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
unsigned long flags;
defer_index = 0;
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
if (cmd->sync_cmd == 1 || cmd->scmd) {
@@ -2873,7 +3131,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
&instance->internal_reset_pending_q);
}
}
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
@@ -3438,7 +3696,9 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
int j;
u32 max_cmd;
struct megasas_cmd *cmd;
+ struct fusion_context *fusion;
+ fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
@@ -3471,13 +3731,11 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
}
}
- /*
- * Add all the commands to command pool (instance->cmd_pool)
- */
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
memset(cmd, 0, sizeof(struct megasas_cmd));
cmd->index = i;
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
cmd->scmd = NULL;
cmd->instance = instance;
@@ -3548,11 +3806,11 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
- } else {
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
/*
* the following function will get the instance PD LIST.
@@ -3584,7 +3842,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
pci_free_consistent(instance->pdev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3630,6 +3893,8 @@ megasas_get_ld_list(struct megasas_instance *instance)
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[0] = 1;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -3641,18 +3906,19 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
- } else {
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
ld_count = le32_to_cpu(ci->ldCount);
/* the following function will get the instance PD LIST */
- if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
@@ -3668,7 +3934,11 @@ megasas_get_ld_list(struct megasas_instance *instance)
ci,
ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3715,6 +3985,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->mbox.b[0] = query_type;
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[2] = 1;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
@@ -3727,16 +3999,15 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
- ret = 0;
- } else {
- /* On failure, call older LD list DCMD */
- ret = 1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
tgtid_count = le32_to_cpu(ci->count);
- if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+ if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
@@ -3748,7 +4019,11 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3762,7 +4037,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
* This information is mainly used to find out the maximum IO transfer per
* command supported by the FW.
*/
-static int
+int
megasas_get_ctrl_info(struct megasas_instance *instance,
struct megasas_ctrl_info *ctrl_info)
{
@@ -3803,18 +4078,84 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->mbox.b[0] = 1;
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (!ret)
memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
- } else {
- ret = -1;
- }
pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
+ * to firmware
+ *
+ * @instance: Adapter soft state
+ * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
+ MR_CRASH_BUF_TURN_OFF = 0
+ MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->mbox.b[0] = crash_buf_state;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3948,6 +4289,13 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
if (megasas_issue_init_mfi(instance))
goto fail_fw_init;
+ if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+ dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+ "Fail from %s %d\n", instance->unique_id,
+ __func__, __LINE__);
+ goto fail_fw_init;
+ }
+
instance->fw_support_ieee = 0;
instance->fw_support_ieee =
(instance->instancet->read_fw_status_reg(reg_set) &
@@ -3986,7 +4334,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 tmp_sectors, msix_enable, scratch_pad_2;
resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
- struct megasas_ctrl_info *ctrl_info;
+ struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
int i, loop, fw_msix_count = 0;
struct IOV_111 *iovPtr;
@@ -4103,17 +4451,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
(unsigned int)num_online_cpus());
for (i = 0; i < instance->msix_vectors; i++)
instance->msixentry[i].entry = i;
- i = pci_enable_msix(instance->pdev, instance->msixentry,
- instance->msix_vectors);
- if (i >= 0) {
- if (i) {
- if (!pci_enable_msix(instance->pdev,
- instance->msixentry, i))
- instance->msix_vectors = i;
- else
- instance->msix_vectors = 0;
- }
- } else
+ i = pci_enable_msix_range(instance->pdev, instance->msixentry,
+ 1, instance->msix_vectors);
+ if (i)
+ instance->msix_vectors = i;
+ else
instance->msix_vectors = 0;
dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
@@ -4123,6 +4465,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors);
}
+ instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+ GFP_KERNEL);
+ if (instance->ctrl_info == NULL)
+ goto fail_init_adapter;
+
+ /*
+ * Below are default value for legacy Firmware.
+ * non-fusion based controllers
+ */
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4145,8 +4498,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
- ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
-
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
@@ -4157,58 +4508,79 @@ static int megasas_init_fw(struct megasas_instance *instance)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+ ctrl_info = instance->ctrl_info;
- max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- le16_to_cpu(ctrl_info->max_strips_per_io);
- max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
- tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
- /*Check whether controller is iMR or MR */
- if (ctrl_info->memory_size) {
- instance->is_imr = 0;
- dev_info(&instance->pdev->dev, "Controller type: MR,"
- "Memory size is: %dMB\n",
- le16_to_cpu(ctrl_info->memory_size));
- } else {
- instance->is_imr = 1;
- dev_info(&instance->pdev->dev,
- "Controller type: iMR\n");
- }
- /* OnOffProperties are converted into CPU arch*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- /* adapterOperations2 are converted into CPU arch*/
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- instance->mpio = ctrl_info->adapterOperations2.mpio;
- instance->UnevenSpanSupport =
- ctrl_info->adapterOperations2.supportUnevenSpans;
- if (instance->UnevenSpanSupport) {
- struct fusion_context *fusion = instance->ctrl_context;
- dev_info(&instance->pdev->dev, "FW supports: "
- "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
- if (MR_ValidateMapInfo(instance))
- fusion->fast_path_io = 1;
- else
- fusion->fast_path_io = 0;
+ /*Check whether controller is iMR or MR */
+ if (ctrl_info->memory_size) {
+ instance->is_imr = 0;
+ dev_info(&instance->pdev->dev, "Controller type: MR,"
+ "Memory size is: %dMB\n",
+ le16_to_cpu(ctrl_info->memory_size));
+ } else {
+ instance->is_imr = 1;
+ dev_info(&instance->pdev->dev,
+ "Controller type: iMR\n");
+ }
+ /* OnOffProperties are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+ instance->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ /* adapterOperations2 are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (instance->UnevenSpanSupport) {
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ dev_info(&instance->pdev->dev, "FW supports: "
+ "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+ }
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
}
- if (ctrl_info->host_interface.SRIOV) {
- if (!ctrl_info->adapterOperations2.activePassive)
- instance->PlasmaFW111 = 1;
-
- if (!instance->PlasmaFW111)
- instance->requestorId =
- ctrl_info->iov.requestorId;
- else {
- iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
- instance->requestorId = iovPtr->requestorId;
- }
- printk(KERN_WARNING "megaraid_sas: I am VF "
- "requestorId %d\n", instance->requestorId);
- }
+ dev_warn(&instance->pdev->dev, "I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
+
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+ instance->crash_dump_fw_support =
+ ctrl_info->adapterOperations3.supportCrashDump;
+ instance->crash_dump_drv_support =
+ (instance->crash_dump_fw_support &&
+ instance->crash_dump_buf);
+ if (instance->crash_dump_drv_support) {
+ dev_info(&instance->pdev->dev, "Firmware Crash dump "
+ "feature is supported\n");
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ } else {
+ if (instance->crash_dump_buf)
+ pci_free_consistent(instance->pdev,
+ CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+ instance->crash_dump_buf = NULL;
}
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
@@ -4256,6 +4628,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
fail_init_adapter:
fail_ready_state:
+ kfree(instance->ctrl_info);
+ instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
@@ -4351,7 +4725,11 @@ megasas_get_seq_num(struct megasas_instance *instance,
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return 0;
}
@@ -4634,6 +5012,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
+ struct fusion_context *fusion = NULL;
/* Reset MSI-X in the kdump kernel */
if (reset_devices) {
@@ -4694,10 +5073,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
- struct fusion_context *fusion;
-
- instance->ctrl_context =
- kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
+ instance->ctrl_context_pages =
+ get_order(sizeof(struct fusion_context));
+ instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
+ instance->ctrl_context_pages);
if (!instance->ctrl_context) {
printk(KERN_DEBUG "megasas: Failed to allocate "
"memory for Fusion context info\n");
@@ -4705,7 +5084,9 @@ static int megasas_probe_one(struct pci_dev *pdev,
}
fusion = instance->ctrl_context;
INIT_LIST_HEAD(&fusion->cmd_pool);
- spin_lock_init(&fusion->cmd_pool_lock);
+ spin_lock_init(&fusion->mpt_pool_lock);
+ memset(fusion->load_balance_info, 0,
+ sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
}
break;
default: /* For all other supported controllers */
@@ -4728,13 +5109,29 @@ static int megasas_probe_one(struct pci_dev *pdev,
break;
}
+ /* Crash dump feature related initialisation*/
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->crash_dump_fw_support = 0;
+ instance->crash_dump_app_support = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ spin_lock_init(&instance->crashdump_lock);
+ instance->crash_dump_buf = NULL;
+
+ if (!reset_devices)
+ instance->crash_dump_buf = pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
+ if (!instance->crash_dump_buf)
+ dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+ "crash dump DMA buffer\n");
+
megasas_poll_wait_aen = 0;
instance->flag_ieee = 0;
instance->ev = NULL;
instance->issuepend_done = 1;
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
instance->is_imr = 0;
- megasas_poll_wait_aen = 0;
instance->evt_detail = pci_alloc_consistent(pdev,
sizeof(struct
@@ -4758,7 +5155,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
- spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
@@ -4771,13 +5168,14 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->host = host;
instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+ instance->ctrl_info = NULL;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
instance->flag_ieee = 1;
sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
} else
- sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+ sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
megasas_dbg_lvl = 0;
instance->flag = 0;
@@ -4789,9 +5187,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
- else
+ INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+ } else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
/*
@@ -4836,8 +5235,9 @@ retry_irq_register:
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
@@ -4846,11 +5246,14 @@ retry_irq_register:
instance->msix_vectors = 0;
goto retry_irq_register;
}
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev, "Error setting"
- "affinity hint for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev,
+ "Error setting affinity hint "
+ "for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
}
} else {
instance->irq_context[0].instance = instance;
@@ -4894,6 +5297,10 @@ retry_irq_register:
goto fail_start_aen;
}
+ /* Get current SR-IOV LD/VF affiliation */
+ if (instance->requestorId)
+ megasas_get_ld_vf_affiliation(instance, 1);
+
return 0;
fail_start_aen:
@@ -4905,8 +5312,9 @@ retry_irq_register:
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -4979,7 +5387,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dev_err(&instance->pdev->dev, "Command timedout"
" from %s\n", __func__);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5026,7 +5438,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "Command timedout"
"from %s\n", __func__);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5069,8 +5485,9 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5132,9 +5549,10 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors)
- pci_enable_msix(instance->pdev, instance->msixentry,
- instance->msix_vectors);
+ if (instance->msix_vectors &&
+ pci_enable_msix_exact(instance->pdev, instance->msixentry,
+ instance->msix_vectors))
+ goto fail_reenable_msix;
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
@@ -5178,8 +5596,9 @@ megasas_resume(struct pci_dev *pdev)
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
@@ -5187,11 +5606,14 @@ megasas_resume(struct pci_dev *pdev)
goto fail_irq;
}
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev, "Error setting"
- "affinity hint for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error "
+ "setting affinity hint for cpu "
+ "%d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
}
} else {
instance->irq_context[0].instance = instance;
@@ -5243,6 +5665,7 @@ fail_init_mfi:
fail_set_dma_mask:
fail_ready_state:
+fail_reenable_msix:
pci_disable_device(pdev);
@@ -5273,6 +5696,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -5306,8 +5731,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5322,14 +5748,18 @@ static void megasas_detach_one(struct pci_dev *pdev)
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
megasas_release_fusion(instance);
- for (i = 0; i < 2 ; i++)
+ for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
- fusion->map_sz,
+ fusion->max_map_sz,
fusion->ld_map[i],
- fusion->
- ld_map_phys[i]);
- kfree(instance->ctrl_context);
+ fusion->ld_map_phys[i]);
+ if (fusion->ld_drv_map[i])
+ free_pages((ulong)fusion->ld_drv_map[i],
+ fusion->drv_map_pages);
+ }
+ free_pages((ulong)instance->ctrl_context,
+ instance->ctrl_context_pages);
break;
default:
megasas_release_mfi(instance);
@@ -5342,6 +5772,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
break;
}
+ kfree(instance->ctrl_info);
+
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
@@ -5363,6 +5795,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->hb_host_mem,
instance->hb_host_mem_h);
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf, instance->crash_dump_h);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -5385,8 +5821,9 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5448,12 +5885,53 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
spin_lock_irqsave(&poll_aen_lock, flags);
if (megasas_poll_wait_aen)
mask = (POLLIN | POLLRDNORM);
+
else
mask = 0;
+ megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
return mask;
}
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ * Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd: MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(
+ struct megasas_cmd *cmd)
+{
+ struct megasas_instance *local_instance;
+ int i, error = 0;
+ int crash_support;
+
+ crash_support = cmd->frame->dcmd.mbox.w[0];
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ local_instance = megasas_mgmt_info.instance[i];
+ if (local_instance && local_instance->crash_dump_drv_support) {
+ if ((local_instance->adprecovery ==
+ MEGASAS_HBA_OPERATIONAL) &&
+ !megasas_set_crash_dump_params(local_instance,
+ crash_support)) {
+ local_instance->crash_dump_app_support =
+ crash_support;
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set success\n");
+ error = 0;
+ } else {
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set failed\n");
+ error = -1;
+ }
+ }
+ }
+ return error;
+}
+
/**
* megasas_mgmt_fw_ioctl - Issues management ioctls to FW
* @instance: Adapter soft state
@@ -5500,6 +5978,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
+ if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+ error = megasas_set_crash_dump_params_ioctl(cmd);
+ megasas_return_cmd(instance, cmd);
+ return error;
+ }
+
/*
* The management interface between applications and the fw uses
* MFI frames. E.g, RAID configuration changes, LD property changes
@@ -5619,9 +6103,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr));
+ kbuff_arr[i] = NULL;
}
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return error;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 081bfff12d00..685e6f391fe4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -55,6 +55,13 @@
#include "megaraid_sas.h"
#include <asm/div64.h>
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+ "threshold. Valid Values are 1-128. Default: 4");
+
+
#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
#define MR_LD_STATE_OPTIMAL 3
#define FALSE 0
@@ -66,16 +73,13 @@
#define SPAN_INVALID 0xff
/* Prototypes */
-void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
- struct LD_LOAD_BALANCE_INFO *lbInfo);
-
-static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
PLD_SPAN_INFO ldSpanInfo);
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
- struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+ struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
- u64 strip, struct MR_FW_RAID_MAP_ALL *map);
+ u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
u32 mega_mod64(u64 dividend, u32 divisor)
{
@@ -109,94 +113,183 @@ u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
return d;
}
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].ldRaid;
}
static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
- struct MR_FW_RAID_MAP_ALL
+ struct MR_DRV_RAID_MAP_ALL
*map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[0];
}
-static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
}
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
}
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldTgtIdToLd[ldTgtId];
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
}
/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ int i;
+
+
+ struct MR_DRV_RAID_MAP_ALL *drv_map =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if (instance->supportmax256vd) {
+ memcpy(fusion->ld_drv_map[instance->map_id & 1],
+ fusion->ld_map[instance->map_id & 1],
+ fusion->current_map_sz);
+ /* New Raid map will not set totalSize, so keep expected value
+ * for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
+ } else {
+ fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ fusion->ld_map[(instance->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < pFwRaidMap->ldCount; i++) {
+ dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ instance->unique_id, i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+ }
+#endif
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
+ pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u8)pFwRaidMap->ldTgtIdToLd[i];
+ for (i = 0; i < pDrvRaidMap->ldCount; i++) {
+ pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev,
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
+ "raid map %p LD RAID MAP %p/%p\n", drv_map,
+ pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
+ &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
+}
+
+/*
* This function will validate Map info data provided by FW
*/
u8 MR_ValidateMapInfo(struct megasas_instance *instance)
{
- struct fusion_context *fusion = instance->ctrl_context;
- struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
- struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
- PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
- struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ struct MR_DRV_RAID_MAP *pDrvRaidMap;
+ struct LD_LOAD_BALANCE_INFO *lbInfo;
+ PLD_SPAN_INFO ldSpanInfo;
struct MR_LD_RAID *raid;
int ldCount, num_lds;
u16 ld;
+ u32 expected_size;
+
+
+ MR_PopulateDrvRaidMap(instance);
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
+ lbInfo = fusion->load_balance_info;
+ ldSpanInfo = fusion->log_to_span;
- if (le32_to_cpu(pFwRaidMap->totalSize) !=
- (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
- printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
- (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
- sizeof(struct MR_LD_SPAN_MAP)) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- le32_to_cpu(pFwRaidMap->ldCount))));
- printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
- ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
- le32_to_cpu(pFwRaidMap->totalSize));
+ if (instance->supportmax256vd)
+ expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+ else
+ expected_size =
+ (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
+
+ if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+ dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
+ (unsigned int) expected_size);
+ dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+ (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+ le32_to_cpu(pDrvRaidMap->totalSize));
return 0;
}
if (instance->UnevenSpanSupport)
- mr_update_span_set(map, ldSpanInfo);
+ mr_update_span_set(drv_map, ldSpanInfo);
- mr_update_load_balance_params(map, lbInfo);
+ mr_update_load_balance_params(drv_map, lbInfo);
- num_lds = le32_to_cpu(map->raidMap.ldCount);
+ num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
/*Convert Raid capability values to CPU arch */
for (ldCount = 0; ldCount < num_lds; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, map);
- raid = MR_LdRaidGet(ld, map);
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
}
@@ -204,7 +297,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
}
u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
struct MR_QUAD_ELEMENT *quad;
@@ -246,7 +339,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* ldSpanInfo - ldSpanInfo per HBA instance
*/
#if SPAN_DEBUG
-static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
{
u8 span;
@@ -257,9 +351,9 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
int ldCount;
u16 ld;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES)
+ if (ld >= MAX_LOGICAL_DRIVES_EXT)
continue;
raid = MR_LdRaidGet(ld, map);
dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -339,7 +433,7 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
*/
u32 mr_spanset_get_span_block(struct megasas_instance *instance,
- u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -402,7 +496,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
*/
static u64 get_row_from_strip(struct megasas_instance *instance,
- u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -471,7 +565,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
*/
static u64 get_strip_from_row(struct megasas_instance *instance,
- u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -532,7 +626,7 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
*/
static u32 get_arm_from_strip(struct megasas_instance *instance,
- u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -580,7 +674,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
/* This Function will return Phys arm */
u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
/* Need to check correct default value */
@@ -624,7 +718,7 @@ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u32 pd, arRef;
@@ -682,6 +776,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
return retval;
}
@@ -705,7 +800,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u32 pd, arRef;
@@ -778,6 +873,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
return retval;
}
@@ -794,7 +890,7 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
{
struct MR_LD_RAID *raid;
u32 ld, stripSize, stripe_mask;
@@ -1043,8 +1139,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
* ldSpanInfo - ldSpanInfo per HBA instance
*
*/
-void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
- PLD_SPAN_INFO ldSpanInfo)
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
{
u8 span, count;
u32 element, span_row_width;
@@ -1056,9 +1152,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
u16 ld;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES)
+ if (ld >= MAX_LOGICAL_DRIVES_EXT)
continue;
raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
@@ -1152,90 +1248,105 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
}
-void
-mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
- struct LD_LOAD_BALANCE_INFO *lbInfo)
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo)
{
int ldCount;
u16 ld;
struct MR_LD_RAID *raid;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES) {
+ if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+ lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ if (ld >= MAX_LOGICAL_DRIVES_EXT) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
- raid = MR_LdRaidGet(ld, map);
-
- /* Two drive Optimal RAID 1 */
- if ((raid->level == 1) && (raid->rowSize == 2) &&
- (raid->spanDepth == 1) && raid->ldState ==
- MR_LD_STATE_OPTIMAL) {
- u32 pd, arRef;
-
- lbInfo[ldCount].loadBalanceFlag = 1;
-
- /* Get the array on which this span is present */
- arRef = MR_LdSpanArrayGet(ld, 0, map);
-
- /* Get the Pd */
- pd = MR_ArPdGet(arRef, 0, map);
- /* Get dev handle from Pd */
- lbInfo[ldCount].raid1DevHandle[0] =
- MR_PdDevHandleGet(pd, map);
- /* Get the Pd */
- pd = MR_ArPdGet(arRef, 1, map);
-
- /* Get the dev handle from Pd */
- lbInfo[ldCount].raid1DevHandle[1] =
- MR_PdDevHandleGet(pd, map);
- } else
+ raid = MR_LdRaidGet(ld, drv_map);
+ if ((raid->level != 1) ||
+ (raid->ldState != MR_LD_STATE_OPTIMAL)) {
lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+ lbInfo[ldCount].loadBalanceFlag = 1;
}
}
-u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
- u32 count)
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
{
- u16 pend0, pend1;
+ struct fusion_context *fusion;
+ struct MR_LD_RAID *raid;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ u16 pend0, pend1, ld;
u64 diff0, diff1;
- u8 bestArm;
+ u8 bestArm, pd0, pd1, span, arm;
+ u32 arRef, span_row_size;
+
+ u64 block = io_info->ldStartBlock;
+ u32 count = io_info->numBlocks;
+
+ span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+ >> RAID_CTX_SPANARM_SPAN_SHIFT);
+ arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ span_row_size = instance->UnevenSpanSupport ?
+ SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+ arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+ pd0 = MR_ArPdGet(arRef, arm, drv_map);
+ pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+ (arm + 1 - span_row_size) : arm + 1, drv_map);
/* get the pending cmds for the data and mirror arms */
- pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
- pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
/* Determine the disk whose head is nearer to the req. block */
- diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
- diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
- bestArm = (diff0 <= diff1 ? 0 : 1);
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
- /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
- if ((bestArm == arm && pend0 > pend1 + 4) ||
- (bestArm != arm && pend1 > pend0 + 4))
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
bestArm ^= 1;
/* Update the last accessed block on the correct pd */
- lbInfo->last_accessed_block[bestArm] = block + count - 1;
-
- return bestArm;
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+ lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+ io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+#if SPAN_DEBUG
+ if (arm != bestArm)
+ dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+ "occur - span 0x%x arm 0x%x bestArm 0x%x "
+ "io_info->span_arm 0x%x\n",
+ span, arm, bestArm, io_info->span_arm);
+#endif
+ return io_info->pd_after_lb;
}
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
- struct IO_REQUEST_INFO *io_info)
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
{
- u8 arm, old_arm;
+ u8 arm_pd;
u16 devHandle;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
- old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
-
- /* get best new arm */
- arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
- io_info->numBlocks);
- devHandle = lbInfo->raid1DevHandle[arm];
- atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ /* get best new arm (PD ID) */
+ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+ devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
return devHandle;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3ed03dfab76c..f37eed682c75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -50,6 +50,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -76,8 +77,6 @@ megasas_issue_polled(struct megasas_instance *instance,
void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
- struct IO_REQUEST_INFO *in_info);
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
@@ -91,6 +90,8 @@ void megasas_start_timer(struct megasas_instance *instance,
extern struct megasas_mgmt_info megasas_mgmt_info;
extern int resetwaittime;
+
+
/**
* megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set
@@ -163,7 +164,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
(struct fusion_context *)instance->ctrl_context;
struct megasas_cmd_fusion *cmd = NULL;
- spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
if (!list_empty(&fusion->cmd_pool)) {
cmd = list_entry((&fusion->cmd_pool)->next,
@@ -173,7 +174,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
}
- spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
return cmd;
}
@@ -182,21 +183,47 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
-static inline void
-megasas_return_cmd_fusion(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd)
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
{
unsigned long flags;
struct fusion_context *fusion =
(struct fusion_context *)instance->ctrl_context;
- spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
cmd->scmd = NULL;
cmd->sync_cmd_idx = (u32)ULONG_MAX;
- list_add_tail(&cmd->list, &fusion->cmd_pool);
+ list_add(&cmd->list, (&fusion->cmd_pool)->next);
- spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+}
+
+/**
+ * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
+ * @instance: Adapter soft state
+ * @cmd_mfi: MFI Command packet to be returned to free command pool
+ * @cmd_mpt: MPT Command packet to be returned to free command pool
+ */
+inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi,
+ struct megasas_cmd_fusion *cmd_fusion)
+{
+ unsigned long flags;
+
+ /*
+ * TO DO: optimize this code and use only one lock instead of two
+ * locks being used currently- mpt_pool_lock is acquired
+ * inside mfi_pool_lock
+ */
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
+ dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
+ __func__, __LINE__);
+ atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ __megasas_return_cmd(instance, cmd_mfi);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
/**
@@ -562,9 +589,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
+ struct fusion_context *fusion;
u32 msecs = seconds * 1000;
+ fusion = instance->ctrl_context;
/*
* Wait for cmd_status to change
*/
@@ -573,8 +602,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
msleep(20);
}
- if (frame_hdr->cmd_status == 0xff)
+ if (frame_hdr->cmd_status == 0xff) {
+ if (fusion)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
return -ETIME;
+ }
return 0;
}
@@ -650,6 +683,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
/* driver supports HA / Remote LUN over Fast Path interface */
init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
= 1;
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
+ = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -709,6 +746,13 @@ fail_get_cmd:
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0] - number of LDs being sync'd
+ * dcmd.mbox.b[1] - 0 - complete command immediately.
+ * - 1 - pend till config change
+ * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ * uses extended struct MR_FW_RAID_MAP_EXT
*/
static int
megasas_get_ld_map_info(struct megasas_instance *instance)
@@ -716,7 +760,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
int ret = 0;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- struct MR_FW_RAID_MAP_ALL *ci;
+ void *ci;
dma_addr_t ci_h = 0;
u32 size_map_info;
struct fusion_context *fusion;
@@ -737,10 +781,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- size_map_info = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ size_map_info = fusion->current_map_sz;
- ci = fusion->ld_map[(instance->map_id & 1)];
+ ci = (void *) fusion->ld_map[(instance->map_id & 1)];
ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
if (!ci) {
@@ -749,9 +792,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
return -ENOMEM;
}
- memset(ci, 0, sizeof(*ci));
+ memset(ci, 0, fusion->max_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
+ __func__, cpu_to_le32(size_map_info));
+#endif
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -763,14 +810,17 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (!megasas_issue_polled(instance, cmd))
- ret = 0;
- else {
- printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -807,7 +857,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
u32 size_sync_info, num_lds;
struct fusion_context *fusion;
struct MR_LD_TARGET_SYNC *ci = NULL;
- struct MR_FW_RAID_MAP_ALL *map;
+ struct MR_DRV_RAID_MAP_ALL *map;
struct MR_LD_RAID *raid;
struct MR_LD_TARGET_SYNC *ld_sync;
dma_addr_t ci_h = 0;
@@ -828,7 +878,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
return 1;
}
- map = fusion->ld_map[instance->map_id & 1];
+ map = fusion->ld_drv_map[instance->map_id & 1];
num_lds = le32_to_cpu(map->raidMap.ldCount);
@@ -840,7 +890,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
ci = (struct MR_LD_TARGET_SYNC *)
fusion->ld_map[(instance->map_id - 1) & 1];
- memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
+ memset(ci, 0, fusion->max_map_sz);
ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
@@ -852,8 +902,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
ld_sync->seqNum = raid->seqNum;
}
- size_map_info = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ size_map_info = fusion->current_map_sz;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
@@ -971,7 +1020,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
max_cmd = instance->max_fw_cmds;
- fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
+ fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
fusion->request_alloc_sz =
sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
@@ -988,8 +1037,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fusion->max_sge_in_chain =
MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
- instance->max_num_sge = fusion->max_sge_in_main_msg +
- fusion->max_sge_in_chain - 2;
+ instance->max_num_sge = rounddown_pow_of_two(
+ fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
/* Used for pass thru MFI frame (DCMD) */
fusion->chain_offset_mfi_pthru =
@@ -1016,17 +1065,75 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
goto fail_ioc_init;
megasas_display_intel_branding(instance);
+ if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+ dev_err(&instance->pdev->dev,
+ "Could not get controller info. Fail from %s %d\n",
+ __func__, __LINE__);
+ goto fail_ioc_init;
+ }
+
+ instance->supportmax256vd =
+ instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ /* Below is additional check to address future FW enhancement */
+ if (instance->ctrl_info->max_lds > 64)
+ instance->supportmax256vd = 1;
+ instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ if (instance->supportmax256vd) {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+ dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
+ "Driver supports %d VDs %d PDs\n",
+ instance->fw_supported_vd_count,
+ instance->fw_supported_pd_count,
+ instance->drv_supported_vd_count,
+ instance->drv_supported_pd_count);
instance->flag_ieee = 1;
+ fusion->fast_path_io = 0;
- fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ fusion->old_map_sz =
+ sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->fw_supported_vd_count - 1));
+ fusion->new_map_sz =
+ sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->drv_map_sz =
+ sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->drv_supported_vd_count - 1));
+
+ fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = NULL;
+ fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
+ fusion->drv_map_pages);
+ if (!fusion->ld_drv_map[i]) {
+ dev_err(&instance->pdev->dev, "Could not allocate "
+ "memory for local map info for %d pages\n",
+ fusion->drv_map_pages);
+ if (i == 1)
+ free_pages((ulong)fusion->ld_drv_map[0],
+ fusion->drv_map_pages);
+ goto fail_ioc_init;
+ }
+ }
+
+ fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
+
+ if (instance->supportmax256vd)
+ fusion->current_map_sz = fusion->new_map_sz;
+ else
+ fusion->current_map_sz = fusion->old_map_sz;
- fusion->fast_path_io = 0;
for (i = 0; i < 2; i++) {
fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
- fusion->map_sz,
+ fusion->max_map_sz,
&fusion->ld_map_phys[i],
GFP_KERNEL);
if (!fusion->ld_map[i]) {
@@ -1043,7 +1150,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fail_map_info:
if (i == 1)
- dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
+ dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
megasas_free_cmds_fusion(instance);
@@ -1065,6 +1172,11 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
u32 req_desc_hi,
struct megasas_register_set __iomem *regs)
{
+#if defined(writeq) && defined(CONFIG_64BIT)
+ u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo);
+
+ writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port);
+#else
unsigned long flags;
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1072,6 +1184,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
}
/**
@@ -1224,7 +1337,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
- struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
{
struct MR_LD_RAID *raid;
u32 ld;
@@ -1409,7 +1522,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
- struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
device_id = MEGASAS_DEV_INDEX(instance, scp);
@@ -1486,10 +1599,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1;
- local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
- MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
+ instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
io_request->RaidContext.regLockFlags = 0;
fp_possible = 0;
} else {
@@ -1529,10 +1642,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
(io_info.isRead)) {
io_info.devHandle =
- get_updated_dev_handle(
+ get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
&io_info);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->pd_r1_lb = io_info.pd_after_lb;
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
@@ -1579,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
u16 pd_index = 0;
- struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
u16 devHandle;
@@ -1591,7 +1705,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scmd);
pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+scmd->device->id;
- local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
@@ -1639,7 +1753,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
goto NonFastPath;
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+ if ((ld >= instance->fw_supported_vd_count) ||
+ (!fusion->fast_path_io))
goto NonFastPath;
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1864,10 +1979,11 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
struct megasas_cmd *cmd_mfi;
struct megasas_cmd_fusion *cmd_fusion;
u16 smid, num_completed;
- u8 reply_descript_type, arm;
+ u8 reply_descript_type;
u32 status, extStatus, device_id;
union desc_value d_val;
struct LD_LOAD_BALANCE_INFO *lbinfo;
+ int threshold_reply_count = 0;
fusion = instance->ctrl_context;
@@ -1914,10 +2030,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
lbinfo = &fusion->load_balance_info[device_id];
if (cmd_fusion->scmd->SCp.Status &
MEGASAS_LOAD_BALANCE_FLAG) {
- arm = lbinfo->raid1DevHandle[0] ==
- cmd_fusion->io_request->DevHandle ? 0 :
- 1;
- atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &=
~MEGASAS_LOAD_BALANCE_FLAG;
}
@@ -1941,10 +2054,19 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
break;
case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (!cmd_mfi->mpt_pthr_cmd_blocked) {
+ if (megasas_dbg_lvl == 5)
+ dev_info(&instance->pdev->dev,
+ "freeing mfi/mpt pass-through "
+ "from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
+ cmd_fusion);
+ }
+
megasas_complete_cmd(instance, cmd_mfi, DID_OK);
cmd_fusion->flags = 0;
- megasas_return_cmd_fusion(instance, cmd_fusion);
-
break;
}
@@ -1955,6 +2077,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
desc->Words = ULLONG_MAX;
num_completed++;
+ threshold_reply_count++;
/* Get the next reply descriptor */
if (!fusion->last_reply_idx[MSIxIndex])
@@ -1974,6 +2097,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
break;
+ /*
+ * Write to reply post host index register after completing threshold
+ * number of reply counts and still there are more replies in reply queue
+ * pending to be completed
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ threshold_reply_count = 0;
+ }
}
if (!num_completed)
@@ -2028,7 +2170,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
- u32 mfiStatus, fw_state;
+ u32 mfiStatus, fw_state, dma_state;
if (instance->mask_interrupts)
return IRQ_NONE;
@@ -2050,7 +2192,16 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* If we didn't complete any commands, check for FW fault */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT) {
+ dma_state = instance->instancet->read_fw_status_reg
+ (instance->reg_set) & MFI_STATE_DMADONE;
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support) {
+ /* Start collecting crash, if DMA bit is done */
+ if ((fw_state == MFI_STATE_FAULT) && dma_state)
+ schedule_work(&instance->crash_init);
+ else if (fw_state == MFI_STATE_FAULT)
+ schedule_work(&instance->work_init);
+ } else if (fw_state == MFI_STATE_FAULT) {
printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
@@ -2075,6 +2226,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd;
struct fusion_context *fusion;
struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+ u32 opcode;
cmd = megasas_get_cmd_fusion(instance);
if (!cmd)
@@ -2082,9 +2234,20 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
/* Save the smid. To be used for returning the cmd */
mfi_cmd->context.smid = cmd->index;
-
cmd->sync_cmd_idx = mfi_cmd->index;
+ /* Set this only for Blocked commands */
+ opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
+ mfi_cmd->is_wait_event = 1;
+
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mfi_cmd->is_wait_event = 1;
+
+ if (mfi_cmd->is_wait_event)
+ mfi_cmd->mpt_pthr_cmd_blocked = cmd;
+
/*
* For cmds where the flag is set, store the flag and check
* on completion. For cmds with this flag, don't call
@@ -2173,6 +2336,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
return;
}
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
instance->instancet->fire_cmd(instance, req_desc->u.low,
req_desc->u.high, instance->reg_set);
}
@@ -2203,6 +2367,49 @@ megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
}
/**
+ * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ * return: Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i;
+
+ instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
+ for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+ instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
+ instance->crash_buf_pages);
+ if (!instance->crash_buf[i]) {
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "memory allocation failed at index %d\n", i);
+ break;
+ }
+ }
+ instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i
+;
+ for (i = 0; i < instance->drv_buf_alloc; i++) {
+ if (instance->crash_buf[i])
+ free_pages((ulong)instance->crash_buf[i],
+ instance->crash_buf_pages);
+ }
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ instance->fw_crash_buffer_size = 0;
+}
+
+/**
* megasas_adp_reset_fusion - For controller reset
* @regs: MFI register set
*/
@@ -2345,6 +2552,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 host_diag, abs_state, status_reg, reset_adapter;
+ u32 io_timeout_in_crash_mode = 0;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -2355,8 +2563,45 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED for scsi%d.\n",
instance->host->host_no);
+ mutex_unlock(&instance->reset_mutex);
return FAILED;
}
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+
+ /* IO timeout detected, forcibly put FW in FAULT state */
+ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+ instance->crash_dump_app_support && iotimeout) {
+ dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ "forcibly FAULT Firmware\n");
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ status_reg = readl(&instance->reg_set->doorbell);
+ writel(status_reg | MFI_STATE_FORCE_OCR,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ mutex_unlock(&instance->reset_mutex);
+ do {
+ ssleep(3);
+ io_timeout_in_crash_mode++;
+ dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+ "seconds for crash dump collection and OCR "
+ "to be done\n", (io_timeout_in_crash_mode * 3));
+ } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (io_timeout_in_crash_mode < 80));
+
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "OCR done for IO "
+ "timeout case\n");
+ retval = SUCCESS;
+ } else {
+ dev_info(&instance->pdev->dev, "Controller is not "
+ "operational after 240 seconds wait for IO "
+ "timeout case in FW crash dump mode\n do "
+ "OCR/kill adapter\n");
+ retval = megasas_reset_fusion(shost, 0);
+ }
+ return retval;
+ }
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
@@ -2563,10 +2808,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->frame->dcmd.opcode ==
cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
- megasas_return_cmd(instance,
- cmd_mfi);
- megasas_return_cmd_fusion(
- instance, cmd_fusion);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
} else {
req_desc =
megasas_get_request_descriptor(
@@ -2603,7 +2845,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
/* Reset load balance info */
memset(fusion->load_balance_info, 0,
sizeof(struct LD_LOAD_BALANCE_INFO)
- *MAX_LOGICAL_DRIVES);
+ *MAX_LOGICAL_DRIVES_EXT);
if (!megasas_get_map_info(instance))
megasas_sync_map_info(instance);
@@ -2623,6 +2865,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
printk(KERN_WARNING "megaraid_sas: Reset "
"successful for scsi%d.\n",
instance->host->host_no);
+
+ if (instance->crash_dump_drv_support) {
+ if (instance->crash_dump_app_support)
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_ON);
+ else
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+ }
retval = SUCCESS;
goto out;
}
@@ -2651,6 +2902,74 @@ out:
return retval;
}
+/* Fusion Crash dump collection work queue */
+void megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, crash_init);
+ u32 status_reg;
+ u8 partial_copy = 0;
+
+
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+ /*
+ * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+ * to host crash buffers
+ */
+ if (instance->drv_buf_index == 0) {
+ /* Buffer is already allocated for old Crash dump.
+ * Do OCR and do not wait for crash dump collection
+ */
+ if (instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev, "earlier crash dump is "
+ "not yet copied by application, ignoring this "
+ "crash dump and initiating OCR\n");
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ writel(status_reg,
+ &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ return;
+ }
+ megasas_alloc_host_crash_buffer(instance);
+ dev_info(&instance->pdev->dev, "Number of host crash buffers "
+ "allocated: %d\n", instance->drv_buf_alloc);
+ }
+
+ /*
+ * Driver has allocated max buffers, which can be allocated
+ * and FW has more crash dump data, then driver will
+ * ignore the data.
+ */
+ if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+ dev_info(&instance->pdev->dev, "Driver is done copying "
+ "the buffer: %d\n", instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+ dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+ "of copied buffers: %d\n", instance->drv_buf_index);
+ instance->fw_crash_buffer_size = instance->drv_buf_index;
+ instance->fw_crash_state = AVAILABLE;
+ instance->drv_buf_index = 0;
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ if (!partial_copy)
+ megasas_reset_fusion(instance->host, 0);
+ } else {
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ }
+}
+
+
/* Fusion OCR work queue */
void megasas_fusion_ocr_wq(struct work_struct *work)
{
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e76af5459a09..0d183d521bdd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -86,6 +86,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
+#define THRESHOLD_REPLY_COUNT 50
/*
* Raid Context structure which describes MegaRAID specific IO Parameters
@@ -478,10 +479,13 @@ struct MPI2_IOC_INIT_REQUEST {
#define MAX_ROW_SIZE 32
#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
#define MAX_ARRAYS 128
#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
@@ -601,7 +605,6 @@ struct MR_FW_RAID_MAP {
u32 maxArrays;
} validationInfo;
u32 version[5];
- u32 reserved1[5];
};
u32 ldCount;
@@ -627,6 +630,8 @@ struct IO_REQUEST_INFO {
u8 start_span;
u8 reserved;
u64 start_row;
+ u8 span_arm; /* span[7:5], arm[4:0] */
+ u8 pd_after_lb;
};
struct MR_LD_TARGET_SYNC {
@@ -678,14 +683,14 @@ struct megasas_cmd_fusion {
u32 sync_cmd_idx;
u32 index;
u8 flags;
+ u8 pd_r1_lb;
};
struct LD_LOAD_BALANCE_INFO {
u8 loadBalanceFlag;
u8 reserved1;
- u16 raid1DevHandle[2];
- atomic_t scsi_pending_cmds[2];
- u64 last_accessed_block[2];
+ atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+ u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
};
/* SPAN_SET is info caclulated from span info from Raid map per LD */
@@ -713,11 +718,86 @@ struct MR_FW_RAID_MAP_ALL {
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
} __attribute__ ((packed));
+struct MR_DRV_RAID_MAP {
+ /* total size of this structure, including this field.
+ * This feild will be manupulated by driver for ext raid map,
+ * else pick the value from firmware raid map.
+ */
+ u32 totalSize;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ /* timeout value used by driver in FP IOs*/
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+ struct MR_DRV_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+ /* Not usred in new map */
+ u32 reserved;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
struct list_head cmd_pool;
- spinlock_t cmd_pool_lock;
+ spinlock_t mpt_pool_lock;
dma_addr_t req_frames_desc_phys;
u8 *req_frames_desc;
@@ -749,10 +829,18 @@ struct fusion_context {
struct MR_FW_RAID_MAP_ALL *ld_map[2];
dma_addr_t ld_map_phys[2];
- u32 map_sz;
+ /*Non dma-able memory. Driver local copy.*/
+ struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+ u32 max_map_sz;
+ u32 current_map_sz;
+ u32 old_map_sz;
+ u32 new_map_sz;
+ u32 drv_map_sz;
+ u32 drv_map_pages;
u8 fast_path_io;
- struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+ struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
};
union desc_value {
@@ -763,4 +851,5 @@ union desc_value {
} u;
};
+
#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 39f08dd20556..657b45ca04c5 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT2SAS
#
# This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2012 LSI Corporation
+# Copyright (C) 2007-2014 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 7b14a015c903..088eefa67da8 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.28
+ * mpi2.h Version: 02.00.32
*
* Version History
* ---------------
@@ -78,6 +78,11 @@
* 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
* 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
* 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -103,7 +108,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x1C)
+#define MPI2_HEADER_VERSION_UNIT (0x20)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -263,6 +268,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */
/*
* Defines for the HCBSize and address
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 88cb7f828bbd..510ef0dc8d7b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.23
+ * mpi2_cnfg.h Version: 02.00.26
*
* Version History
* ---------------
@@ -150,7 +150,13 @@
* Added UEFIVersion field to BIOS Page 1 and defined new
* BiosOptions bits.
* 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
- * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ * obsolete for MPI v2.5 and later.
+ * Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
* --------------------------------------------------------------------------
*/
@@ -773,6 +779,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
/* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
@@ -844,7 +851,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
/* defines for IO Unit Page 5 DmaEngineCapabilities field */
-#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
@@ -885,13 +892,17 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U16 IOCTemperature; /* 0x10 */
U8 IOCTemperatureUnits; /* 0x12 */
U8 IOCSpeed; /* 0x13 */
- U16 BoardTemperature; /* 0x14 */
- U8 BoardTemperatureUnits; /* 0x16 */
- U8 Reserved3; /* 0x17 */
+ U16 BoardTemperature; /* 0x14 */
+ U8 BoardTemperatureUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U32 Reserved6; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
-#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
/* defines for IO Unit Page 7 PCIeWidth field */
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -1801,6 +1812,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
@@ -1813,6 +1825,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 9d284dae6553..eea1a16b13ec 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.14
+ * mpi2_init.h Version: 02.00.15
*
* Version History
* ---------------
@@ -37,6 +37,8 @@
* 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
* Priority to match SAM-4.
* 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ * replacing the Reserved4 field.
* --------------------------------------------------------------------------
*/
@@ -234,7 +236,7 @@ typedef struct _MPI2_SCSI_IO_REPLY
U32 SenseCount; /* 0x18 */
U32 ResponseInfo; /* 0x1C */
U16 TaskTag; /* 0x20 */
- U16 Reserved4; /* 0x22 */
+ U16 SCSIStatusQualifier; /* 0x22 */
U32 BidirectionalTransferCount; /* 0x24 */
U32 Reserved5; /* 0x28 */
U32 Reserved6; /* 0x2C */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index d159c5f24aab..2c3b0f28576b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.22
+ * mpi2_ioc.h Version: 02.00.23
*
* Version History
* ---------------
@@ -121,6 +121,11 @@
* 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
* Added ElapsedSeconds field to
* MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
* --------------------------------------------------------------------------
*/
@@ -177,6 +182,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST
#define MPI2_WHOINIT_HOST_DRIVER (0x04)
#define MPI2_WHOINIT_MANUFACTURER (0x05)
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
+
/* MsgVersion */
#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
@@ -189,9 +197,17 @@ typedef struct _MPI2_IOC_INIT_REQUEST
#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
-/* minimum depth for the Reply Descriptor Post Queue */
+/* minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ U64 RDPQBaseAddress; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry;
/* IOCInit Reply message */
typedef struct _MPI2_IOC_INIT_REPLY
@@ -307,6 +323,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
/* ProductID field uses MPI2_FW_HEADER_PID_ */
/* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
@@ -1153,6 +1170,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/* FWDownload TransactionContext Element */
@@ -1379,14 +1397,15 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
/* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
#define MPI2_EXT_IMAGE_TYPE_MAX \
@@ -1555,6 +1574,39 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER
#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+ U8 HashImageType; /* 0x00 */
+ U8 HashAlgorithm; /* 0x01 */
+ U8 EncryptionAlgorithm; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 EncryptedHash[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+ U8 ImageVersion; /* 0x00 */
+ U8 NumHash; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 Reserved2; /* 0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t;
+
/****************************************************************************
* PowerManagementControl message
****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 0d202a2c6db7..7efa58ff0d34 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.09
+ * mpi2_raid.h Version: 02.00.10
*
* Version History
* ---------------
@@ -29,6 +29,7 @@
* 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
* 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
* Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
* --------------------------------------------------------------------------
*/
@@ -45,6 +46,9 @@
* RAID Action messages
****************************************************************************/
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
+
/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 50b39ccd526a..45b6fa10b803 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 11b2ac4e7c6e..9be03ed46180 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.10
+ * mpi2_tool.h Version: 02.00.11
*
* Version History
* ---------------
@@ -29,6 +29,7 @@
* MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
* 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
* it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* --------------------------------------------------------------------------
*/
@@ -48,6 +49,7 @@
#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
/****************************************************************************
@@ -321,6 +323,44 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
+/****************************************************************************
+* Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Console; /* 0x0C */
+ U8 Flags; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U8 TextToDisplay[4]; /* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+
+
+
/*****************************************************************************
*
* Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index 0b128b68a5ea..6b0dcdd02f68 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 2f262be890c5..58e45216d1ec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -80,6 +80,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors ");
+
static int mpt2sas_fwfault_debug;
MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
"and halt firmware - (default=0)");
@@ -88,6 +92,12 @@ static int disable_discovery = -1;
module_param(disable_discovery, int, 0);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+static int
+_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
+static int
+_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
*
@@ -1175,17 +1185,22 @@ static int
_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
- char *desc = NULL;
+ u64 consistent_dma_mask;
+
+ if (ioc->dma_mask)
+ consistent_dma_mask = DMA_BIT_MASK(64);
+ else
+ consistent_dma_mask = DMA_BIT_MASK(32);
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask =
dma_get_required_mask(&pdev->dev);
- if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
- DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(64))) {
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- desc = "64";
+ ioc->dma_mask = 64;
goto out;
}
}
@@ -1194,18 +1209,29 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
ioc->base_add_sg_single = &_base_add_sg_single_32;
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- desc = "32";
+ ioc->dma_mask = 32;
} else
return -ENODEV;
out:
si_meminfo(&s);
- printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
- "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
+ printk(MPT2SAS_INFO_FMT
+ "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
+static int
+_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc,
+ struct pci_dev *pdev)
+{
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+ }
+ return 0;
+}
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -1402,6 +1428,20 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
+ if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+ max_msix_vectors = 8;
+
+ if (max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ ioc->reply_queue_count);
+ ioc->msix_vector_count = ioc->reply_queue_count;
+ } else if (max_msix_vectors == 0)
+ goto try_ioapic;
+
+ printk(MPT2SAS_INFO_FMT
+ "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+ ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
+
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
@@ -1414,10 +1454,10 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
a->entry = i;
- r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+ r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
if (r) {
- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
- "failed (r=%d) !!!\n", ioc->name, r));
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r));
kfree(entries);
goto try_ioapic;
}
@@ -1439,6 +1479,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
/* failback to io_apic interrupt routing */
try_ioapic:
+ ioc->reply_queue_count = 1;
r = _base_request_irq(ioc, 0, ioc->pdev->irq);
return r;
@@ -1520,6 +1561,16 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
}
_base_mask_interrupts(ioc);
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_fail;
+
+ if (!ioc->rdpq_array_enable_assigned) {
+ ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+ ioc->rdpq_array_enable_assigned = 1;
+ }
+
r = _base_enable_msix(ioc);
if (r)
goto out_fail;
@@ -2317,7 +2368,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
static void
_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
{
- int i;
+ int i = 0;
+ struct reply_post_struct *rps;
dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -2358,15 +2410,25 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
ioc->reply_free = NULL;
}
- if (ioc->reply_post_free) {
- pci_pool_free(ioc->reply_post_free_dma_pool,
- ioc->reply_post_free, ioc->reply_post_free_dma);
+ if (ioc->reply_post) {
+ do {
+ rps = &ioc->reply_post[i];
+ if (rps->reply_post_free) {
+ pci_pool_free(
+ ioc->reply_post_free_dma_pool,
+ rps->reply_post_free,
+ rps->reply_post_free_dma);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->name, rps->reply_post_free));
+ rps->reply_post_free = NULL;
+ }
+ } while (ioc->rdpq_array_enable &&
+ (++i < ioc->reply_queue_count));
+
if (ioc->reply_post_free_dma_pool)
pci_pool_destroy(ioc->reply_post_free_dma_pool);
- dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
- "reply_post_free_pool(0x%p): free\n", ioc->name,
- ioc->reply_post_free));
- ioc->reply_post_free = NULL;
+ kfree(ioc->reply_post);
}
if (ioc->config_page) {
@@ -2509,6 +2571,65 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
ioc->chains_needed_per_io));
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+
+ sz = reply_post_free_sz;
+ if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+ sz *= ioc->reply_queue_count;
+
+ ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+ (ioc->reply_queue_count):1,
+ sizeof(struct reply_post_struct), GFP_KERNEL);
+
+ if (!ioc->reply_post) {
+ printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ i = 0;
+ do {
+ ioc->reply_post[i].reply_post_free =
+ pci_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0, sz);
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply post free pool (0x%p): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth, 8, sz/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply_post_free_dma = (0x%llx)\n", ioc->name,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ total_sz += sz;
+ } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+ if (ioc->dma_mask == 64) {
+ if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+ printk(MPT2SAS_WARN_FMT
+ "no suitable consistent DMA mask for %s\n",
+ ioc->name, pci_name(ioc->pdev));
+ goto out;
+ }
+ }
+
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -2720,37 +2841,6 @@ chain_done:
"(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
- /* reply post queue, 16 byte align */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- if (_base_is_controller_msix_enabled(ioc))
- sz = reply_post_free_sz * ioc->reply_queue_count;
- else
- sz = reply_post_free_sz;
- ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
- ioc->pdev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
- "failed\n", ioc->name);
- goto out;
- }
- ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
- GFP_KERNEL, &ioc->reply_post_free_dma);
- if (!ioc->reply_post_free) {
- printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
- "failed\n", ioc->name);
- goto out;
- }
- memset(ioc->reply_post_free, 0, sz);
- dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
- "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
- sz/1024));
- dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
- "(0x%llx)\n", ioc->name, (unsigned long long)
- ioc->reply_post_free_dma));
- total_sz += sz;
-
ioc->config_page_sz = 512;
ioc->config_page = pci_alloc_consistent(ioc->pdev,
ioc->config_page_sz, &ioc->config_page_dma);
@@ -3374,6 +3464,64 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
}
/**
+ * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
+ * @ioc: per adapter object
+ * @timeout:
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state, doorbell;
+ int rc;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ ioc_state = doorbell & MPI2_IOC_STATE_MASK;
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ switch (ioc_state) {
+ case MPI2_IOC_STATE_READY:
+ case MPI2_IOC_STATE_OPERATIONAL:
+ return 0;
+ }
+
+ if (doorbell & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "unexpected doorbell activ!e\n", ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if (ioc_state == MPI2_IOC_STATE_FAULT) {
+ mpt2sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, sleep_flag);
+ return rc;
+}
+
+/**
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
* @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3391,6 +3539,13 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
+ r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+ if (r) {
+ printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n",
+ ioc->name, __func__);
+ return r;
+ }
+
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
memset(&mpi_request, 0, mpi_request_sz);
@@ -3422,6 +3577,9 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
ioc->ir_firmware = 1;
+ if ((facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ ioc->rdpq_array_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3457,9 +3615,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
Mpi2IOCInitRequest_t mpi_request;
Mpi2IOCInitReply_t mpi_reply;
- int r;
+ int i, r = 0;
struct timeval current_time;
u16 ioc_status;
+ u32 reply_post_free_array_sz = 0;
+ Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+ dma_addr_t reply_post_free_array_dma;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -3488,9 +3649,31 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
cpu_to_le64((u64)ioc->request_dma);
mpi_request.ReplyFreeQueueAddress =
cpu_to_le64((u64)ioc->reply_free_dma);
- mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64((u64)ioc->reply_post_free_dma);
+ if (ioc->rdpq_array_enable) {
+ reply_post_free_array_sz = ioc->reply_queue_count *
+ sizeof(Mpi2IOCInitRDPQArrayEntry);
+ reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+ reply_post_free_array_sz, &reply_post_free_array_dma);
+ if (!reply_post_free_array) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free_array: pci_alloc_consistent failed\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(reply_post_free_array, 0, reply_post_free_array_sz);
+ for (i = 0; i < ioc->reply_queue_count; i++)
+ reply_post_free_array[i].RDPQBaseAddress =
+ cpu_to_le64(
+ (u64)ioc->reply_post[i].reply_post_free_dma);
+ mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)reply_post_free_array_dma);
+ } else {
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+ }
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
@@ -3518,7 +3701,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (r != 0) {
printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
ioc->name, __func__, r);
- return r;
+ goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3528,7 +3711,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
r = -EIO;
}
- return 0;
+out:
+ if (reply_post_free_array)
+ pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+ reply_post_free_array,
+ reply_post_free_array_dma);
+ return r;
}
/**
@@ -4061,7 +4249,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
u8 hide_flag;
struct adapter_reply_queue *reply_q;
long reply_post_free;
- u32 reply_post_free_sz;
+ u32 reply_post_free_sz, index = 0;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
@@ -4132,19 +4320,27 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free = (long)ioc->reply_post_free;
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
+ reply_post_free = (long)ioc->reply_post[index].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
reply_q->reply_post_host_index = 0;
reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
reply_post_free;
for (i = 0; i < ioc->reply_post_queue_depth; i++)
reply_q->reply_post_free[i].Words =
- cpu_to_le64(ULLONG_MAX);
+ cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- reply_post_free += reply_post_free_sz;
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable)
+ reply_post_free = (long)
+ ioc->reply_post[++index].reply_post_free;
+ else
+ reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
@@ -4272,6 +4468,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
}
}
+ ioc->rdpq_array_enable_assigned = 0;
+ ioc->dma_mask = 0;
r = mpt2sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
@@ -4633,6 +4831,16 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
r = -EFAULT;
goto out;
}
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+
+ if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+ panic("%s: Issue occurred with flashing controller firmware."
+ "Please reboot the system and ensure that the correct"
+ " firmware version is running\n", ioc->name);
+
r = _base_make_ioc_operational(ioc, sleep_flag);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0ac5815a7f91..239f169b0673 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "16.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 16
+#define MPT2SAS_DRIVER_VERSION "18.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 18
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -355,6 +355,7 @@ struct _internal_cmd {
* @slot: number number
* @phy: phy identifier provided in sas device page 0
* @responding: used in _scsih_sas_device_mark_responding
+ * @pfa_led_on: flag for PFA LED status
*/
struct _sas_device {
struct list_head list;
@@ -373,6 +374,7 @@ struct _sas_device {
u16 slot;
u8 phy;
u8 responding;
+ u8 pfa_led_on;
};
/**
@@ -634,6 +636,11 @@ struct mpt2sas_port_facts {
u16 MaxPostedCmdBuffers;
};
+struct reply_post_struct {
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+};
+
/**
* enum mutex_type - task management mutex type
* @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -661,6 +668,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
@@ -777,8 +785,11 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @reply_free_dma_pool:
* @reply_free_host_index: tail index in pool to insert free replys
* @reply_post_queue_depth: reply post queue depth
- * @reply_post_free: pool for reply post (64bit descriptor)
- * @reply_post_free_dma:
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ * is assigned only ones
* @reply_queue_count: number of reply queue's
* @reply_queue_list: link list contaning the reply queue info
* @reply_post_host_index: head index in the pool where FW completes IO
@@ -800,6 +811,7 @@ struct MPT2SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
+ int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -970,8 +982,10 @@ struct MPT2SAS_ADAPTER {
/* reply post queue */
u16 reply_post_queue_depth;
- Mpi2ReplyDescriptorsUnion_t *reply_post_free;
- dma_addr_t reply_post_free_dma;
+ struct reply_post_struct *reply_post;
+ u8 rdpq_array_capable;
+ u8 rdpq_array_enable;
+ u8 rdpq_array_enable_assigned;
struct dma_pool *reply_post_free_dma_pool;
u8 reply_queue_count;
struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 0c47425c73f2..c72a2fff5dbb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 62df8f9d4271..ca4e563c01dd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8b2ac1869dcc..fa0567c96050 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index a9021cbd6628..cc57ef31d0fe 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index dd461015813f..c80ed0482649 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -55,6 +55,8 @@
#include <linux/raid_class.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
+
#include "mpt2sas_base.h"
MODULE_AUTHOR(MPT2SAS_AUTHOR);
@@ -145,7 +147,7 @@ struct sense_info {
};
-#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT2SAS_TURN_ON_PFA_LED (0xFFFC)
#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
/**
@@ -3858,85 +3860,46 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
u16 smid)
{
- u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
+ sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
- u8 num_pds, *cdb_ptr, i;
- u8 cdb0 = scmd->cmnd[0];
- u64 v_llba;
+ u8 num_pds, cmd = scmd->cmnd[0];
- /*
- * Try Direct I/O to RAID memeber disks
- */
- if (cdb0 == READ_16 || cdb0 == READ_10 ||
- cdb0 == WRITE_16 || cdb0 == WRITE_10) {
- cdb_ptr = mpi_request->CDB.CDB32;
-
- if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
- | cdb_ptr[5])) {
- io_size = scsi_bufflen(scmd) >>
- raid_device->block_exponent;
- i = (cdb0 < READ_16) ? 2 : 6;
- /* get virtual lba */
- v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
-
- if (((u64)v_lba + (u64)io_size - 1) <=
- (u32)raid_device->max_lba) {
- stripe_sz = raid_device->stripe_sz;
- stripe_exp = raid_device->stripe_exponent;
- stripe_off = v_lba & (stripe_sz - 1);
-
- /* Check whether IO falls within a stripe */
- if ((stripe_off + io_size) <= stripe_sz) {
- num_pds = raid_device->num_pds;
- p_lba = v_lba >> stripe_exp;
- stripe_unit = p_lba / num_pds;
- column = p_lba % num_pds;
- p_lba = (stripe_unit << stripe_exp) +
- stripe_off;
- mpi_request->DevHandle =
- cpu_to_le16(raid_device->
- pd_handle[column]);
- (*(__be32 *)(&cdb_ptr[i])) =
- cpu_to_be32(p_lba);
- /*
- * WD: To indicate this I/O is directI/O
- */
- _scsih_scsi_direct_io_set(ioc, smid, 1);
- }
- }
- } else {
- io_size = scsi_bufflen(scmd) >>
- raid_device->block_exponent;
- /* get virtual lba */
- v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
-
- if ((v_llba + (u64)io_size - 1) <=
- raid_device->max_lba) {
- stripe_sz = raid_device->stripe_sz;
- stripe_exp = raid_device->stripe_exponent;
- stripe_off = (u32) (v_llba & (stripe_sz - 1));
-
- /* Check whether IO falls within a stripe */
- if ((stripe_off + io_size) <= stripe_sz) {
- num_pds = raid_device->num_pds;
- p_lba = (u32)(v_llba >> stripe_exp);
- stripe_unit = p_lba / num_pds;
- column = p_lba % num_pds;
- p_lba = (stripe_unit << stripe_exp) +
- stripe_off;
- mpi_request->DevHandle =
- cpu_to_le16(raid_device->
- pd_handle[column]);
- (*(__be64 *)(&cdb_ptr[2])) =
- cpu_to_be64((u64)p_lba);
- /*
- * WD: To indicate this I/O is directI/O
- */
- _scsih_scsi_direct_io_set(ioc, smid, 1);
- }
- }
- }
- }
+ if (cmd != READ_10 && cmd != WRITE_10 &&
+ cmd != READ_16 && cmd != WRITE_16)
+ return;
+
+ if (cmd == READ_10 || cmd == WRITE_10)
+ v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]);
+ else
+ v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]);
+
+ io_size = scsi_bufflen(scmd) >> raid_device->block_exponent;
+
+ if (v_lba + io_size - 1 > raid_device->max_lba)
+ return;
+
+ stripe_sz = raid_device->stripe_sz;
+ stripe_exp = raid_device->stripe_exponent;
+ stripe_off = v_lba & (stripe_sz - 1);
+
+ /* Return unless IO falls within a stripe */
+ if (stripe_off + io_size > stripe_sz)
+ return;
+
+ num_pds = raid_device->num_pds;
+ p_lba = v_lba >> stripe_exp;
+ column = sector_div(p_lba, num_pds);
+ p_lba = (p_lba << stripe_exp) + stripe_off;
+
+ mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]);
+
+ if (cmd == READ_10 || cmd == WRITE_10)
+ put_unaligned_be32(lower_32_bits(p_lba),
+ &mpi_request->CDB.CDB32[2]);
+ else
+ put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
+
+ _scsih_scsi_direct_io_set(ioc, smid, 1);
}
/**
@@ -4308,7 +4271,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_turn_on_fault_led - illuminate Fault LED
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
* @ioc: per adapter object
* @handle: device handle
* Context: process
@@ -4316,10 +4279,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* Return nothing.
*/
static void
-_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+ struct _sas_device *sas_device;
+
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -4334,6 +4302,47 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
__FILE__, __LINE__, __func__);
return;
}
+ sas_device->pfa_led_on = 1;
+
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_turn_off_pfa_led - turn off PFA LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus = 0;
+ mpi_request.Slot = cpu_to_le16(sas_device->slot);
+ mpi_request.DevHandle = 0;
+ mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
@@ -4345,7 +4354,7 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
/**
- * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
@@ -4353,14 +4362,14 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* Return nothing.
*/
static void
-_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
struct fw_event_work *fw_event;
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event)
return;
- fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+ fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
fw_event->device_handle = handle;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
@@ -4404,7 +4413,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
- _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+ _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -5325,6 +5334,12 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
{
struct MPT2SAS_TARGET *sas_target_priv_data;
+ if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+ (sas_device->pfa_led_on)) {
+ _scsih_turn_off_pfa_led(ioc, sas_device);
+ sas_device->pfa_led_on = 0;
+ }
+
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
sas_device->handle, (unsigned long long)
@@ -7441,8 +7456,8 @@ _firmware_event_work(struct work_struct *work)
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
"from worker thread\n", ioc->name));
break;
- case MPT2SAS_TURN_ON_FAULT_LED:
- _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ case MPT2SAS_TURN_ON_PFA_LED:
+ _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
@@ -8132,6 +8147,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT2SAS_ADAPTER *ioc;
struct Scsi_Host *shost;
+ int rv;
shost = scsi_host_alloc(&scsih_driver_template,
sizeof(struct MPT2SAS_ADAPTER));
@@ -8227,6 +8243,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ioc->firmware_event_thread) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_thread_fail;
}
@@ -8234,6 +8251,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if ((mpt2sas_base_attach(ioc))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_attach_fail;
}
@@ -8251,7 +8269,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else
ioc->hide_drives = 0;
- if ((scsi_add_host(shost, &pdev->dev))) {
+ rv = scsi_add_host(shost, &pdev->dev);
+ if (rv) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out_add_shost_fail;
@@ -8268,7 +8287,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_thread_fail:
list_del(&ioc->list);
scsi_host_put(shost);
- return -ENODEV;
+ return rv;
}
#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 410f4a3e8888..0d1d06488a28 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2013 LSI Corporation
+ * Copyright (C) 2007-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index d53e1b02e893..4d235dd741bf 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT3SAS
#
# This code is based on drivers/scsi/mpt3sas/Kconfig
-# Copyright (C) 2012-2013 LSI Corporation
+# Copyright (C) 2012-2014 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 20da8f907c00..c34c1157907b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.29
+ * mpi2.h Version: 02.00.31
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -86,6 +86,8 @@
* 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
* 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -119,7 +121,7 @@
#define MPI2_VERSION_02_05 (0x0205)
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x1D)
+#define MPI2_HEADER_VERSION_UNIT (0x1F)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 889aa7067899..e261a3153bb3 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.24
+ * mpi2_cnfg.h Version: 02.00.26
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -160,6 +160,11 @@
* 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
* obsolete for MPI v2.5 and later.
* Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
+ * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
+ * future use.
* --------------------------------------------------------------------------
*/
@@ -792,6 +797,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
/*IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
@@ -870,7 +876,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
/*defines for IO Unit Page 5 DmaEngineCapabilities field */
-#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
@@ -920,11 +926,15 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U8
BoardTemperatureUnits; /*0x16 */
U8 Reserved3; /*0x17 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U32 Reserved6; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
} MPI2_CONFIG_PAGE_IO_UNIT_7,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
-#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index f7928bf66478..068c98efd742 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.14
+ * mpi2_init.h Version: 02.00.15
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -44,6 +44,8 @@
* Priority to match SAM-4.
* Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
* 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ * replacing the Reserved4 field.
* --------------------------------------------------------------------------
*/
@@ -347,7 +349,7 @@ typedef struct _MPI2_SCSI_IO_REPLY {
U32 SenseCount; /*0x18 */
U32 ResponseInfo; /*0x1C */
U16 TaskTag; /*0x20 */
- U16 Reserved4; /*0x22 */
+ U16 SCSIStatusQualifier; /* 0x22 */
U32 BidirectionalTransferCount; /*0x24 */
U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
U32 Reserved6; /*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index e2bb82143720..490830957806 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.22
+ * mpi2_ioc.h Version: 02.00.23
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -127,6 +127,11 @@
* 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
* Added ElapsedSeconds field to
* MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
* --------------------------------------------------------------------------
*/
@@ -182,6 +187,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
#define MPI2_WHOINIT_HOST_DRIVER (0x04)
#define MPI2_WHOINIT_MANUFACTURER (0x05)
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
+
+
/*MsgVersion */
#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
@@ -194,9 +203,19 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
-/*minimum depth for the Reply Descriptor Post Queue */
+/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ U64 RDPQBaseAddress; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry;
+
+
/*IOCInit Reply message */
typedef struct _MPI2_IOC_INIT_REPLY {
U8 WhoInit; /*0x00 */
@@ -306,6 +325,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
@@ -1140,6 +1160,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/*MPI v2.0 FWDownload TransactionContext Element */
@@ -1404,6 +1425,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER {
#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1560,6 +1582,42 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER {
/*defines for the ResetVector field */
#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+ U8 HashImageType; /* 0x00 */
+ U8 HashAlgorithm; /* 0x01 */
+ U8 EncryptionAlgorithm; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+ U8 ImageVersion; /* 0x00 */
+ U8 NumHash; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 Reserved2; /* 0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
+
+
+
/****************************************************************************
* PowerManagementControl message
****************************************************************************/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 71765236afef..13d93ca029d5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.09
+ * mpi2_raid.h Version: 02.00.10
*
* Version History
* ---------------
@@ -30,6 +30,7 @@
* 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
* 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
* Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
* --------------------------------------------------------------------------
*/
@@ -46,6 +47,9 @@
* RAID Action messages
****************************************************************************/
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
+
/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index cba046f6a4b4..156e30543a2f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_sas.h
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.07
+ * mpi2_sas.h Version: 02.00.08
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -30,6 +30,8 @@
* 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
* 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
* Passthrough Request message.
+ * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
+ * for anything newer than MPI v2.0.
* --------------------------------------------------------------------------
*/
@@ -251,7 +253,7 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
-#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 34e9a7ba76b0..904910d8a737 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.10
+ * mpi2_tool.h Version: 02.00.11
*
* Version History
* ---------------
@@ -32,6 +32,7 @@
* message.
* 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
* it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* --------------------------------------------------------------------------
*/
@@ -51,6 +52,7 @@
#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
/****************************************************************************
* Toolbox reply
@@ -331,6 +333,45 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
Mpi2ToolboxDiagnosticCliReply_t,
*pMpi2ToolboxDiagnosticCliReply_t;
+
+/****************************************************************************
+* Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Console; /* 0x0C */
+ U8 Flags; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U8 TextToDisplay[4]; /* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+*pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+
+
+
/*****************************************************************************
*
* Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index ba1fed50966e..99ab093602e8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
*
*
* Name: mpi2_type.h
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 93ce2b2baa41..1560115079c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -91,6 +91,8 @@ static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
" enable detection of firmware fault and halt firmware - (default=0)");
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -1482,17 +1484,22 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
- char *desc = NULL;
+ u64 consistent_dma_mask;
+
+ if (ioc->dma_mask)
+ consistent_dma_mask = DMA_BIT_MASK(64);
+ else
+ consistent_dma_mask = DMA_BIT_MASK(32);
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask =
dma_get_required_mask(&pdev->dev);
if ((required_mask > DMA_BIT_MASK(32)) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- desc = "64";
+ ioc->dma_mask = 64;
goto out;
}
}
@@ -1501,19 +1508,30 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
ioc->base_add_sg_single = &_base_add_sg_single_32;
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- desc = "32";
+ ioc->dma_mask = 32;
} else
return -ENODEV;
out:
si_meminfo(&s);
pr_info(MPT3SAS_FMT
- "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->name, desc, convert_to_kb(s.totalram));
+ "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
+static int
+_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
+ struct pci_dev *pdev)
+{
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+ }
+ return 0;
+}
+
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -1698,11 +1716,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
ioc->cpu_count, max_msix_vectors);
+ if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+ max_msix_vectors = 8;
+
if (max_msix_vectors > 0) {
ioc->reply_queue_count = min_t(int, max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
- }
+ } else if (max_msix_vectors == 0)
+ goto try_ioapic;
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -1716,10 +1738,10 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
a->entry = i;
- r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+ r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
if (r) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_enable_msix failed (r=%d) !!!\n",
+ "pci_enable_msix_exact failed (r=%d) !!!\n",
ioc->name, r));
kfree(entries);
goto try_ioapic;
@@ -1742,6 +1764,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
/* failback to io_apic interrupt routing */
try_ioapic:
+ ioc->reply_queue_count = 1;
r = _base_request_irq(ioc, 0, ioc->pdev->irq);
return r;
@@ -1821,6 +1844,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
_base_mask_interrupts(ioc);
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_fail;
+
+ if (!ioc->rdpq_array_enable_assigned) {
+ ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+ ioc->rdpq_array_enable_assigned = 1;
+ }
+
r = _base_enable_msix(ioc);
if (r)
goto out_fail;
@@ -2185,6 +2218,53 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
&ioc->scsi_lookup_lock);
}
+/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI25_MFGPAGE_DEVID_SAS3008:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT3SAS_INTEL_RMS3JC080_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RMS3JC080_BRANDING);
+ break;
+
+ case MPT3SAS_INTEL_RS3GC008_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3GC008_BRANDING);
+ break;
+ case MPT3SAS_INTEL_RS3FC044_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3FC044_BRANDING);
+ break;
+ case MPT3SAS_INTEL_RS3UC080_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3UC080_BRANDING);
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+}
+
/**
@@ -2216,6 +2296,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
(bios_version & 0x0000FF00) >> 8,
bios_version & 0x000000FF);
+ _base_display_intel_branding(ioc);
+
pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -2447,7 +2529,8 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
- int i;
+ int i = 0;
+ struct reply_post_struct *rps;
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -2492,15 +2575,25 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free = NULL;
}
- if (ioc->reply_post_free) {
- pci_pool_free(ioc->reply_post_free_dma_pool,
- ioc->reply_post_free, ioc->reply_post_free_dma);
+ if (ioc->reply_post) {
+ do {
+ rps = &ioc->reply_post[i];
+ if (rps->reply_post_free) {
+ pci_pool_free(
+ ioc->reply_post_free_dma_pool,
+ rps->reply_post_free,
+ rps->reply_post_free_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->name, rps->reply_post_free));
+ rps->reply_post_free = NULL;
+ }
+ } while (ioc->rdpq_array_enable &&
+ (++i < ioc->reply_queue_count));
+
if (ioc->reply_post_free_dma_pool)
pci_pool_destroy(ioc->reply_post_free_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_pool(0x%p): free\n", ioc->name,
- ioc->reply_post_free));
- ioc->reply_post_free = NULL;
+ kfree(ioc->reply_post);
}
if (ioc->config_page) {
@@ -2647,6 +2740,65 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
ioc->chains_needed_per_io));
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+
+ sz = reply_post_free_sz;
+ if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+ sz *= ioc->reply_queue_count;
+
+ ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+ (ioc->reply_queue_count):1,
+ sizeof(struct reply_post_struct), GFP_KERNEL);
+
+ if (!ioc->reply_post) {
+ pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ i = 0;
+ do {
+ ioc->reply_post[i].reply_post_free =
+ pci_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply post free pool (0x%p): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth, 8, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_dma = (0x%llx)\n", ioc->name,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ total_sz += sz;
+ } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+ if (ioc->dma_mask == 64) {
+ if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+ pr_warn(MPT3SAS_FMT
+ "no suitable consistent DMA mask for %s\n",
+ ioc->name, pci_name(ioc->pdev));
+ goto out;
+ }
+ }
+
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -2861,40 +3013,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
- /* reply post queue, 16 byte align */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- if (_base_is_controller_msix_enabled(ioc))
- sz = reply_post_free_sz * ioc->reply_queue_count;
- else
- sz = reply_post_free_sz;
- ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
- ioc->pdev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: pci_pool_create failed\n",
- ioc->name);
- goto out;
- }
- ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
- GFP_KERNEL, &ioc->reply_post_free_dma);
- if (!ioc->reply_post_free) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: pci_pool_alloc failed\n",
- ioc->name);
- goto out;
- }
- memset(ioc->reply_post_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
- "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
- sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_dma = (0x%llx)\n",
- ioc->name, (unsigned long long)
- ioc->reply_post_free_dma));
- total_sz += sz;
-
ioc->config_page_sz = 512;
ioc->config_page = pci_alloc_consistent(ioc->pdev,
ioc->config_page_sz, &ioc->config_page_dma);
@@ -3577,6 +3695,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
ioc->ir_firmware = 1;
+ if ((facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ ioc->rdpq_array_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3613,9 +3734,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
{
Mpi2IOCInitRequest_t mpi_request;
Mpi2IOCInitReply_t mpi_reply;
- int r;
+ int i, r = 0;
struct timeval current_time;
u16 ioc_status;
+ u32 reply_post_free_array_sz = 0;
+ Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+ dma_addr_t reply_post_free_array_dma;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -3644,9 +3768,31 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
cpu_to_le64((u64)ioc->request_dma);
mpi_request.ReplyFreeQueueAddress =
cpu_to_le64((u64)ioc->reply_free_dma);
- mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64((u64)ioc->reply_post_free_dma);
+ if (ioc->rdpq_array_enable) {
+ reply_post_free_array_sz = ioc->reply_queue_count *
+ sizeof(Mpi2IOCInitRDPQArrayEntry);
+ reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+ reply_post_free_array_sz, &reply_post_free_array_dma);
+ if (!reply_post_free_array) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free_array: pci_alloc_consistent failed\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(reply_post_free_array, 0, reply_post_free_array_sz);
+ for (i = 0; i < ioc->reply_queue_count; i++)
+ reply_post_free_array[i].RDPQBaseAddress =
+ cpu_to_le64(
+ (u64)ioc->reply_post[i].reply_post_free_dma);
+ mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)reply_post_free_array_dma);
+ } else {
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+ }
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
@@ -3674,7 +3820,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
ioc->name, __func__, r);
- return r;
+ goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3684,7 +3830,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
r = -EIO;
}
- return 0;
+out:
+ if (reply_post_free_array)
+ pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+ reply_post_free_array,
+ reply_post_free_array_dma);
+ return r;
}
/**
@@ -4234,7 +4385,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
struct _tr_list *delayed_tr, *delayed_tr_next;
struct adapter_reply_queue *reply_q;
long reply_post_free;
- u32 reply_post_free_sz;
+ u32 reply_post_free_sz, index = 0;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -4305,9 +4456,9 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free = (long)ioc->reply_post_free;
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
+ reply_post_free = (long)ioc->reply_post[index].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
reply_q->reply_post_host_index = 0;
reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
@@ -4317,7 +4468,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- reply_post_free += reply_post_free_sz;
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable)
+ reply_post_free = (long)
+ ioc->reply_post[++index].reply_post_free;
+ else
+ reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
@@ -4428,6 +4587,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ ioc->rdpq_array_enable_assigned = 0;
+ ioc->dma_mask = 0;
r = mpt3sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
@@ -4804,6 +4965,12 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
goto out;
+
+ if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+ panic("%s: Issue occurred with flashing controller firmware."
+ "Please reboot the system and ensure that the correct"
+ " firmware version is running\n", ioc->name);
+
r = _base_make_ioc_operational(ioc, sleep_flag);
if (!r)
_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 9b90a6fef706..40926aa9b24d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -70,8 +70,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "02.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 2
+#define MPT3SAS_DRIVER_VERSION "04.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 4
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -130,7 +130,25 @@
#define MPT_TARGET_FLAGS_DELETED 0x04
#define MPT_TARGET_FASTPATH_IO 0x08
+/*
+ * Intel HBA branding
+ */
+#define MPT3SAS_INTEL_RMS3JC080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS3JC080"
+#define MPT3SAS_INTEL_RS3GC008_BRANDING \
+ "Intel(R) RAID Controller RS3GC008"
+#define MPT3SAS_INTEL_RS3FC044_BRANDING \
+ "Intel(R) RAID Controller RS3FC044"
+#define MPT3SAS_INTEL_RS3UC080_BRANDING \
+ "Intel(R) RAID Controller RS3UC080"
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521
+#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522
+#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523
+#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
/*
* status bits for ioc->diag_buffer_status
@@ -272,8 +290,10 @@ struct _internal_cmd {
* @channel: target channel
* @slot: number number
* @phy: phy identifier provided in sas device page 0
- * @fast_path: fast path feature enable bit
* @responding: used in _scsih_sas_device_mark_responding
+ * @fast_path: fast path feature enable bit
+ * @pfa_led_on: flag for PFA LED status
+ *
*/
struct _sas_device {
struct list_head list;
@@ -293,6 +313,7 @@ struct _sas_device {
u8 phy;
u8 responding;
u8 fast_path;
+ u8 pfa_led_on;
};
/**
@@ -548,6 +569,11 @@ struct mpt3sas_port_facts {
u16 MaxPostedCmdBuffers;
};
+struct reply_post_struct {
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+};
+
/**
* enum mutex_type - task management mutex type
* @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -576,6 +602,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
@@ -691,8 +718,11 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @reply_free_dma_pool:
* @reply_free_host_index: tail index in pool to insert free replys
* @reply_post_queue_depth: reply post queue depth
- * @reply_post_free: pool for reply post (64bit descriptor)
- * @reply_post_free_dma:
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ * is assigned only ones
* @reply_queue_count: number of reply queue's
* @reply_queue_list: link list contaning the reply queue info
* @reply_post_host_index: head index in the pool where FW completes IO
@@ -714,6 +744,7 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
+ int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -893,8 +924,10 @@ struct MPT3SAS_ADAPTER {
/* reply post queue */
u16 reply_post_queue_depth;
- Mpi2ReplyDescriptorsUnion_t *reply_post_free;
- dma_addr_t reply_post_free_dma;
+ struct reply_post_struct *reply_post;
+ u8 rdpq_array_capable;
+ u8 rdpq_array_enable;
+ u8 rdpq_array_enable_assigned;
struct dma_pool *reply_post_free_dma_pool;
u8 reply_queue_count;
struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 936ec0391990..4472c2af9255 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index ba9cbe598a91..dca14877d5ab 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 53b0c480d98f..5f3d7fd7c2f8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
index 545b22d2cbdf..4778e7dd98bd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_debug.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 135f12c20ecf..857276b8880f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -159,7 +159,7 @@ struct sense_info {
};
#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
-#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
@@ -3885,7 +3885,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_turn_on_fault_led - illuminate Fault LED
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
* @ioc: per adapter object
* @handle: device handle
* Context: process
@@ -3893,10 +3893,15 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* Return nothing.
*/
static void
-_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+ struct _sas_device *sas_device;
+
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -3911,6 +3916,7 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
__FILE__, __LINE__, __func__);
return;
}
+ sas_device->pfa_led_on = 1;
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3920,9 +3926,46 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
return;
}
}
+/**
+ * _scsih_turn_off_pfa_led - turn off Fault LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus = 0;
+ mpi_request.Slot = cpu_to_le16(sas_device->slot);
+ mpi_request.DevHandle = 0;
+ mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
/**
- * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
@@ -3930,14 +3973,14 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return nothing.
*/
static void
-_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct fw_event_work *fw_event;
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event)
return;
- fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+ fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
fw_event->device_handle = handle;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
@@ -3981,7 +4024,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
- _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+ _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -4911,7 +4954,11 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
{
struct MPT3SAS_TARGET *sas_target_priv_data;
-
+ if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+ (sas_device->pfa_led_on)) {
+ _scsih_turn_off_pfa_led(ioc, sas_device);
+ sas_device->pfa_led_on = 0;
+ }
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
@@ -7065,8 +7112,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
"port enable: complete from worker thread\n",
ioc->name));
break;
- case MPT3SAS_TURN_ON_FAULT_LED:
- _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ case MPT3SAS_TURN_ON_PFA_LED:
+ _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
@@ -7734,6 +7781,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT3SAS_ADAPTER *ioc;
struct Scsi_Host *shost;
+ int rv;
shost = scsi_host_alloc(&scsih_driver_template,
sizeof(struct MPT3SAS_ADAPTER));
@@ -7826,6 +7874,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ioc->firmware_event_thread) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_thread_fail;
}
@@ -7833,12 +7882,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if ((mpt3sas_base_attach(ioc))) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_attach_fail;
}
- if ((scsi_add_host(shost, &pdev->dev))) {
+ rv = scsi_add_host(shost, &pdev->dev);
+ if (rv) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- list_del(&ioc->list);
goto out_add_shost_fail;
}
@@ -7851,7 +7901,7 @@ out_add_shost_fail:
out_thread_fail:
list_del(&ioc->list);
scsi_host_put(shost);
- return -ENODEV;
+ return rv;
}
#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 65170cb1a00f..d4bafaaebea9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index f6533ab20364..8a2dd113f401 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -3,7 +3,7 @@
* (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
index bb693923bef1..f681db56c53b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -4,7 +4,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
- * Copyright (C) 2012-2013 LSI Corporation
+ * Copyright (C) 2012-2014 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 50b086aef178..90abb03c9074 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -915,7 +915,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
int ret;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
- "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x "
+ "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
@@ -930,7 +930,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
/* check target ID is not same as this initiator ID */
if (scmd_id(SCpnt) == SCpnt->device->host->this_id) {
- nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???");
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "target==host???");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 340ceff03823..34aad32829f5 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -533,7 +533,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
} while ((--time_out != 0) && (reg & mask) != 0);
if (time_out == 0) {
- nsp_msg(KERN_DEBUG, " %s signal off timeut", str);
+ nsp_msg(KERN_DEBUG, " %s signal off timeout", str);
}
return 0;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7abbf284da1a..be8269c8d127 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -385,7 +385,6 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
char *str = buf;
- void *virt_addr;
int bios_index;
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
@@ -402,11 +401,10 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
return -ENOMEM;
}
wait_for_completion(&completion);
- virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
bios_index++)
str += sprintf(str, "%c",
- *((u8 *)((u8 *)virt_addr+bios_index)));
+ *(payload.func_specific+bios_index));
kfree(payload.func_specific);
return str - buf;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dd12c6fe57a6..933f21471951 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3132,6 +3132,7 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
void
pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
+ struct fw_control_ex *fw_control_context;
struct get_nvm_data_resp *pPayload =
(struct get_nvm_data_resp *)(piomb + 4);
u32 tag = le32_to_cpu(pPayload->tag);
@@ -3140,6 +3141,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 ir_tds_bn_dps_das_nvm =
le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+ fw_control_context = ccb->fw_control_context;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
if ((dlen_status & NVMD_STAT) != 0) {
@@ -3180,6 +3182,12 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
(dlen_status & NVMD_LEN) >> 24));
}
+ /* Though fw_control_context is freed below, usrAddr still needs
+ * to be updated as this holds the response to the request function
+ */
+ memcpy(fw_control_context->usrAddr,
+ pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+ fw_control_context->len);
kfree(ccb->fw_control_context);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 6f3275d020a0..bcb64eb1387f 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4698,19 +4698,10 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
entries[i].entry = i;
- rc = pci_enable_msix(pdev, entries, num_hrrq);
- if (rc < 0)
+ num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
+ if (num_hrrq < 0)
goto pmcraid_isr_legacy;
- /* Check how many MSIX vectors are allocated and register
- * msi-x handlers for each of them giving appropriate buffer
- */
- if (rc > 0) {
- num_hrrq = rc;
- if (pci_enable_msix(pdev, entries, num_hrrq))
- goto pmcraid_isr_legacy;
- }
-
for (i = 0; i < num_hrrq; i++) {
pinstance->hrrq_vector[i].hrrq_id = i;
pinstance->hrrq_vector[i].drv_inst = pinstance;
@@ -4746,7 +4737,6 @@ pmcraid_isr_legacy:
pinstance->hrrq_vector[0].drv_inst = pinstance;
pinstance->hrrq_vector[0].vector = pdev->irq;
pinstance->num_hrrq = 1;
- rc = 0;
rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 16fe5196e6d9..82b92c414a9c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -484,7 +484,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
- || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+ || IS_QLA27XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@@ -987,6 +988,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
continue;
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1014,7 +1017,7 @@ qla2x00_fw_version_show(struct device *dev,
char fw_str[128];
return scnprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops->fw_version_str(vha, fw_str));
+ ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
}
static ssize_t
@@ -1440,7 +1443,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int rval = QLA_FUNCTION_FAILED;
- uint16_t state[5];
+ uint16_t state[6];
uint32_t pstate;
if (IS_QLAFX00(vha->hw)) {
@@ -1456,8 +1459,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
- return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
- state[1], state[2], state[3], state[4]);
+ return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ state[0], state[1], state[2], state[3], state[4], state[5]);
}
static ssize_t
@@ -1924,7 +1927,8 @@ qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
- qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
+ qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)));
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 524f9eb7fcd1..2e2bb6f45ce6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1390,7 +1390,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c72ee97bf3f7..d77fe43793b6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,19 +11,15 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
- * | | | 0x0144,0x0146 |
+ * | Module Init and Probe | 0x017d | 0x0144,0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
- * | Mailbox commands | 0x118d | 0x1018-0x1019 |
- * | | | 0x10ca |
- * | | | 0x1115-0x1116 |
- * | | | 0x111a-0x111b |
- * | | | 0x1155-0x1158 |
- * | Device Discovery | 0x2095 | 0x2020-0x2022, |
+ * | Mailbox commands | 0x118d | 0x1115-0x1116 |
+ * | | | 0x111a-0x111b |
+ * | Device Discovery | 0x2016 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
- * | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
+ * | | | 0x2099-0x20a4 |
+ * | Queue Command and IO tracing | 0x3059 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
@@ -31,10 +27,10 @@
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x5087 | 0x502b-0x502f |
- * | | | 0x5047,0x5052 |
+ * | | | 0x5047 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
- * | | | 0x507b |
+ * | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
* | User Space Interactions | 0x70e2 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
@@ -64,13 +60,15 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd212 | 0xd017-0xd019 |
- * | | | 0xd020 |
- * | | | 0xd030-0xd0ff |
+ * | Misc | 0xd213 | 0xd011-0xd017 |
+ * | | | 0xd021,0xd024 |
+ * | | | 0xd025,0xd029 |
+ * | | | 0xd02a,0xd02e |
+ * | | | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
- * | | | 0xd213-0xd2fe |
- * | Target Mode | 0xe078 | |
- * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
+ * | | | 0xd214-0xd2fe |
+ * | Target Mode | 0xe079 | |
+ * | Target Mode Management | 0xf072 | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b64399153135..5f6b2960cccb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -191,6 +191,11 @@
* reset-recovery completion is
* second
*/
+/* ISP2031: Values for laser on/off */
+#define PORT_0_2031 0x00201340
+#define PORT_1_2031 0x00201350
+#define LASER_ON_2031 0x01800100
+#define LASER_OFF_2031 0x01800180
/*
* The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
@@ -261,6 +266,7 @@
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
+#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
@@ -803,6 +809,7 @@ struct mbx_cmd_32 {
#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
+#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
@@ -948,6 +955,7 @@ struct mbx_cmd_32 {
#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */
#define MBC_READ_SFP 0x31 /* Read SFP Data. */
#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
+#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */
#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */
@@ -2016,6 +2024,8 @@ typedef struct fc_port {
unsigned long last_ramp_up;
uint16_t port_id;
+
+ unsigned long retry_delay_timestamp;
} fc_port_t;
#include "qla_mr.h"
@@ -2056,10 +2066,21 @@ static const char * const port_state_str[] = {
#define CT_REJECT_RESPONSE 0x8001
#define CT_ACCEPT_RESPONSE 0x8002
-#define CT_REASON_INVALID_COMMAND_CODE 0x01
-#define CT_REASON_CANNOT_PERFORM 0x09
-#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
-#define CT_EXPL_ALREADY_REGISTERED 0x10
+#define CT_REASON_INVALID_COMMAND_CODE 0x01
+#define CT_REASON_CANNOT_PERFORM 0x09
+#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
+#define CT_EXPL_ALREADY_REGISTERED 0x10
+#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11
+#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12
+#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13
+#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14
+#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15
+#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16
+#define CT_EXPL_HBA_NOT_REGISTERED 0x17
+#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20
+#define CT_EXPL_PORT_NOT_REGISTERED 0x21
+#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22
+#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23
#define NS_N_PORT_TYPE 0x01
#define NS_NL_PORT_TYPE 0x02
@@ -2116,33 +2137,40 @@ static const char * const port_state_str[] = {
* HBA attribute types.
*/
#define FDMI_HBA_ATTR_COUNT 9
-#define FDMI_HBA_NODE_NAME 1
-#define FDMI_HBA_MANUFACTURER 2
-#define FDMI_HBA_SERIAL_NUMBER 3
-#define FDMI_HBA_MODEL 4
-#define FDMI_HBA_MODEL_DESCRIPTION 5
-#define FDMI_HBA_HARDWARE_VERSION 6
-#define FDMI_HBA_DRIVER_VERSION 7
-#define FDMI_HBA_OPTION_ROM_VERSION 8
-#define FDMI_HBA_FIRMWARE_VERSION 9
+#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI_HBA_NODE_NAME 0x1
+#define FDMI_HBA_MANUFACTURER 0x2
+#define FDMI_HBA_SERIAL_NUMBER 0x3
+#define FDMI_HBA_MODEL 0x4
+#define FDMI_HBA_MODEL_DESCRIPTION 0x5
+#define FDMI_HBA_HARDWARE_VERSION 0x6
+#define FDMI_HBA_DRIVER_VERSION 0x7
+#define FDMI_HBA_OPTION_ROM_VERSION 0x8
+#define FDMI_HBA_FIRMWARE_VERSION 0x9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
+#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_NUM_PORTS 0xe
+#define FDMI_HBA_FABRIC_NAME 0xf
+#define FDMI_HBA_BOOT_BIOS_NAME 0x10
+#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
uint16_t type;
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[32];
- uint8_t serial_num[8];
+ uint8_t manufacturer[64];
+ uint8_t serial_num[32];
uint8_t model[16];
uint8_t model_desc[80];
- uint8_t hw_version[16];
+ uint8_t hw_version[32];
uint8_t driver_version[32];
uint8_t orom_version[16];
- uint8_t fw_version[16];
+ uint8_t fw_version[32];
uint8_t os_version[128];
- uint8_t max_ct_len[4];
+ uint32_t max_ct_len;
} a;
};
@@ -2151,16 +2179,56 @@ struct ct_fdmi_hba_attributes {
struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
};
+struct ct_fdmiv2_hba_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t node_name[WWN_SIZE];
+ uint8_t manufacturer[32];
+ uint8_t serial_num[32];
+ uint8_t model[16];
+ uint8_t model_desc[80];
+ uint8_t hw_version[16];
+ uint8_t driver_version[32];
+ uint8_t orom_version[16];
+ uint8_t fw_version[32];
+ uint8_t os_version[128];
+ uint32_t max_ct_len;
+ uint8_t sym_name[256];
+ uint32_t vendor_id;
+ uint32_t num_ports;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t bios_name[32];
+ uint8_t vendor_indentifer[8];
+ } a;
+};
+
+struct ct_fdmiv2_hba_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+};
+
/*
* Port attribute types.
*/
#define FDMI_PORT_ATTR_COUNT 6
-#define FDMI_PORT_FC4_TYPES 1
-#define FDMI_PORT_SUPPORT_SPEED 2
-#define FDMI_PORT_CURRENT_SPEED 3
-#define FDMI_PORT_MAX_FRAME_SIZE 4
-#define FDMI_PORT_OS_DEVICE_NAME 5
-#define FDMI_PORT_HOST_NAME 6
+#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI_PORT_FC4_TYPES 0x1
+#define FDMI_PORT_SUPPORT_SPEED 0x2
+#define FDMI_PORT_CURRENT_SPEED 0x3
+#define FDMI_PORT_MAX_FRAME_SIZE 0x4
+#define FDMI_PORT_OS_DEVICE_NAME 0x5
+#define FDMI_PORT_HOST_NAME 0x6
+#define FDMI_PORT_NODE_NAME 0x7
+#define FDMI_PORT_NAME 0x8
+#define FDMI_PORT_SYM_NAME 0x9
+#define FDMI_PORT_TYPE 0xa
+#define FDMI_PORT_SUPP_COS 0xb
+#define FDMI_PORT_FABRIC_NAME 0xc
+#define FDMI_PORT_FC4_TYPE 0xd
+#define FDMI_PORT_STATE 0x101
+#define FDMI_PORT_COUNT 0x102
+#define FDMI_PORT_ID 0x103
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
@@ -2171,7 +2239,11 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_SPEED_32GB 0x40
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
-struct ct_fdmi_port_attr {
+#define FC_CLASS_2 0x04
+#define FC_CLASS_3 0x08
+#define FC_CLASS_2_3 0x0C
+
+struct ct_fdmiv2_port_attr {
uint16_t type;
uint16_t len;
union {
@@ -2181,12 +2253,40 @@ struct ct_fdmi_port_attr {
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t port_sym_name[128];
+ uint32_t port_type;
+ uint32_t port_supported_cos;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t port_fc4_type[32];
+ uint32_t port_state;
+ uint32_t num_ports;
+ uint32_t port_id;
} a;
};
/*
* Port Attribute Block.
*/
+struct ct_fdmiv2_port_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+};
+
+struct ct_fdmi_port_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t fc4_types[32];
+ uint32_t sup_speed;
+ uint32_t cur_speed;
+ uint32_t max_frame_size;
+ uint8_t os_dev_name[32];
+ uint8_t host_name[32];
+ } a;
+};
+
struct ct_fdmi_port_attributes {
uint32_t count;
struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
@@ -2286,6 +2386,13 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
+ uint32_t entry_count;
+ uint8_t port_name[8];
+ struct ct_fdmiv2_hba_attributes attrs;
+ } rhba2;
+
+ struct {
+ uint8_t hba_identifier[8];
struct ct_fdmi_hba_attributes attrs;
} rhat;
@@ -2296,6 +2403,11 @@ struct ct_sns_req {
struct {
uint8_t port_name[8];
+ struct ct_fdmiv2_port_attributes attrs;
+ } rpa2;
+
+ struct {
+ uint8_t port_name[8];
} dhba;
struct {
@@ -2522,7 +2634,7 @@ struct isp_operations {
int (*load_risc) (struct scsi_qla_host *, uint32_t *);
char * (*pci_info_str) (struct scsi_qla_host *, char *);
- char * (*fw_version_str) (struct scsi_qla_host *, char *);
+ char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
irq_handler_t intr_handler;
void (*enable_intrs) (struct qla_hw_data *);
@@ -2664,6 +2776,9 @@ struct qla_statistics {
uint32_t control_requests;
uint64_t jiffies_at_last_reset;
+ uint32_t stat_max_pend_cmds;
+ uint32_t stat_max_qfull_cmds_alloc;
+ uint32_t stat_max_qfull_cmds_dropped;
};
struct bidi_statistics {
@@ -2786,8 +2901,22 @@ struct qlt_hw_data {
uint8_t saved_add_firmware_options[2];
uint8_t tgt_node_name[WWN_SIZE];
+
+ struct list_head q_full_list;
+ uint32_t num_pend_cmds;
+ uint32_t num_qfull_cmds_alloc;
+ uint32_t num_qfull_cmds_dropped;
+ spinlock_t q_full_lock;
+ uint32_t leak_exchg_thresh_hold;
};
+#define MAX_QFULL_CMDS_ALLOC 8192
+#define Q_FULL_THRESH_HOLD_PERCENT 90
+#define Q_FULL_THRESH_HOLD(ha) \
+ ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+
+#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2834,7 +2963,8 @@ struct qla_hw_data {
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
- /* 34 bits */
+ uint32_t fawwpn_enabled:1;
+ /* 35 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3032,6 +3162,7 @@ struct qla_hw_data {
#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
+#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3333,6 +3464,7 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
+ uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
@@ -3402,6 +3534,11 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+ unsigned long pci_flags;
+#define PFLG_DISCONNECTED 0 /* PCI device removed */
+#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
+#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
+
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index eb8f57249f1d..42bb357bf56b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -91,7 +91,7 @@ struct nvram_24xx {
/* Firmware Initialization Control Block. */
uint16_t version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
+ __le16 frame_payload_size;
uint16_t execution_throttle;
uint16_t exchange_count;
uint16_t hard_address;
@@ -317,8 +317,8 @@ struct init_cb_24xx {
* BIT 3 = Reserved
* BIT 4 = Enable Target Mode
* BIT 5 = Disable Initiator Mode
- * BIT 6 = Reserved
- * BIT 7 = Reserved
+ * BIT 6 = Acquire FA-WWN
+ * BIT 7 = Enable D-port Diagnostics
*
* BIT 8 = Reserved
* BIT 9 = Non Participating LIP
@@ -567,7 +567,7 @@ struct sts_entry_24xx {
#define SF_TRANSFERRED_DATA BIT_11
#define SF_FCP_RSP_DMA BIT_0
- uint16_t reserved_2;
+ uint16_t retry_delay;
uint16_t scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d646540db3ac..b1865a72ce59 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,6 +72,7 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
extern fc_port_t *
@@ -475,7 +476,8 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
-bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -561,7 +563,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
+extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -613,7 +615,7 @@ extern void qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
-extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
@@ -765,4 +767,5 @@ extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
+extern void qlt_host_reset_handler(struct qla_hw_data *ha);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0df3b1b3823..dccc4dcc39c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_target.h"
+#include <linux/utsname.h>
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -143,10 +144,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
- "%s failed rejected request on port_id: "
- "%02x%02x%02x.\n", routine,
- vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa);
+ "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
+ routine, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa,
+ comp_status, ct_rsp->header.response);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
0x2078, (uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr));
@@ -622,15 +623,16 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
}
void
-qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
{
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
- sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+ snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
ha->mr.fw_version, qla2x00_version_str);
else
- sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+ snprintf(snn, size,
+ "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
}
@@ -670,7 +672,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
/* Prepare the Symbolic Node Name */
- qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
+ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
+ sizeof(ct_req->req.rsnn_nn.sym_node_name));
/* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
@@ -1263,7 +1266,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- uint8_t *entries;
+ void *entries;
struct ct_fdmi_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
@@ -1288,7 +1291,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
entries = ct_req->req.rhba.hba_identifier;
/* Nodename. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
@@ -1298,11 +1301,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"NodeName = %8phN.\n", eiter->a.node_name);
/* Manufacturer. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = strlen(QLA2XXX_MANUFACTURER);
- strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1310,12 +1314,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Manufacturer = %s.\n", eiter->a.manufacturer);
/* Serial number. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
- sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
alen = strlen(eiter->a.serial_num);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1323,11 +1334,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Serial no. = %s.\n", eiter->a.serial_num);
/* Model name. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
- strcpy(eiter->a.model, ha->model_number);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
alen = strlen(eiter->a.model);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1335,11 +1347,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Model Name = %s.\n", eiter->a.model);
/* Model description. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- strncpy(eiter->a.model_desc, ha->model_desc, 80);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
alen = strlen(eiter->a.model_desc);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1347,11 +1360,23 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Model Desc = %s.\n", eiter->a.model_desc);
/* Hardware version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- strcpy(eiter->a.hw_version, ha->adapter_id);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
alen = strlen(eiter->a.hw_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1359,11 +1384,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Hardware ver = %s.\n", eiter->a.hw_version);
/* Driver version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- strcpy(eiter->a.driver_version, qla2x00_version_str);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
alen = strlen(eiter->a.driver_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1371,11 +1397,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Driver ver = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- strcpy(eiter->a.orom_version, "0.00");
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
alen = strlen(eiter->a.orom_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1383,11 +1410,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
"Optrom vers = %s.\n", eiter->a.orom_version);
/* Firmware version */
- eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+ eiter = entries + size;
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
alen = strlen(eiter->a.fw_version);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1419,6 +1447,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2034,
"HBA already registered.\n");
rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
}
} else {
ql_dbg(ql_dbg_disc, vha, 0x2035,
@@ -1429,6 +1462,534 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
}
/**
+ * qla2x00_fdmi_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmi_port_attr *eiter;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RPA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
+ RPA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+ size = WWN_SIZE + 4;
+
+ /* Attributes */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa.port_name;
+
+ /* FC4 types. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[2] = 0x01;
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2039,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
+
+ /* Supported speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
+ else if (IS_QLA25XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA24XX_TYPE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA23XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_1GB);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203a,
+ "Supported_Speed=%x.\n", eiter->a.sup_speed);
+
+ /* Current speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ break;
+ case PORT_SPEED_2GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ break;
+ case PORT_SPEED_4GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ break;
+ case PORT_SPEED_8GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ break;
+ case PORT_SPEED_10GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
+ default:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ break;
+ }
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203b,
+ "Current_Speed=%x.\n", eiter->a.cur_speed);
+
+ /* Max frame size. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203c,
+ "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+
+ /* OS device name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen = strlen(eiter->a.os_dev_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204b,
+ "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+
+ /* Hostname. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname %016llx, size = %d.\n",
+ wwn_to_u64(ct_req->req.rpa.port_name), size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2040,
+ "RPA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "RPA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ }
+
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2041,
+ "RPA exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size, sn;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmiv2_hba_attr *eiter;
+ struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RHBA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
+ RHBA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
+ ct_req->req.rhba2.entry_count = cpu_to_be32(1);
+ memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
+ size = 2 * WWN_SIZE + 4 + 4;
+
+ /* Attributes */
+ ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
+ entries = ct_req->req.rhba2.hba_identifier;
+
+ /* Nodename. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x207d,
+ "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Manufacturer. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
+ alen = strlen(eiter->a.manufacturer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+ /* Serial number. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
+ alen = strlen(eiter->a.serial_num);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "Serial no. = %s.\n", eiter->a.serial_num);
+
+ /* Model name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen = strlen(eiter->a.model);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "Model Name = %s.\n", eiter->a.model);
+
+ /* Model description. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen = strlen(eiter->a.model_desc);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "Model Desc = %s.\n", eiter->a.model_desc);
+
+ /* Hardware version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen = strlen(eiter->a.hw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
+
+ /* Driver version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen = strlen(eiter->a.driver_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "Driver ver = %s.\n", eiter->a.driver_version);
+
+ /* Option ROM version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.orom_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha , 0x20ab,
+ "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
+ eiter->a.orom_version[0]);
+
+ /* Firmware version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
+ alen = strlen(eiter->a.fw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
+
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->version);
+ } else {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s", "Linux", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.os_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "OS Name and Version = %s.\n", eiter->a.os_version);
+
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+
+ /* Node Sybolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen = strlen(eiter->a.sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "Symbolic Name = %s.\n", eiter->a.sym_name);
+
+ /* Vendor Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
+ eiter->a.vendor_id = cpu_to_be32(0x1077);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Id = %x.\n", eiter->a.vendor_id);
+
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b2,
+ "Port Num = %x.\n", eiter->a.num_ports);
+
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b3,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.bios_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b4,
+ "BIOS Name = %s\n", eiter->a.bios_name);
+
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
+ snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+ "%s", "QLGC");
+ alen = strlen(eiter->a.vendor_indentifer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b5,
+ "RHBA identifier = %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba2.hba_identifier));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x20b7,
+ "RHBA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20b8,
+ "HBA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2016,
+ "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20b9,
+ "RHBA FDMI V2 exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
* qla2x00_fdmi_dhba() -
* @ha: HA context
*
@@ -1477,23 +2038,24 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
}
/**
- * qla2x00_fdmi_rpa() -
+ * qla2x00_fdmiv2_rpa() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
{
int rval, alen;
- uint32_t size, max_frame_size;
+ uint32_t size;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- uint8_t *entries;
- struct ct_fdmi_port_attr *eiter;
+ void *entries;
+ struct ct_fdmiv2_port_attr *eiter;
struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
/* Issue RPA */
/* Prepare common MS IOCB */
@@ -1505,147 +2067,258 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
size = WWN_SIZE + 4;
/* Attributes */
- ct_req->req.rpa.attrs.count =
- __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1);
- entries = ct_req->req.rpa.port_name;
+ ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa2.port_name;
/* FC4 types. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = __constant_cpu_to_be16(4 + 32);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
- ql_dbg(ql_dbg_disc, vha, 0x2039,
+ ql_dbg(ql_dbg_disc, vha, 0x20ba,
"FC4_TYPES=%02x %02x.\n",
eiter->a.fc4_types[2],
eiter->a.fc4_types[1]);
/* Supported speed. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = __constant_cpu_to_be16(4 + 4);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
+ eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA27XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_4GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else if (IS_QLA23XX(ha))
- eiter->a.sup_speed =__constant_cpu_to_be32(
- FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
else
- eiter->a.sup_speed = __constant_cpu_to_be32(
+ eiter->a.sup_speed = cpu_to_be32(
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203a,
- "Supported_Speed=%x.\n", eiter->a.sup_speed);
+ ql_dbg(ql_dbg_disc, vha, 0x20bb,
+ "Supported Port Speed = %x.\n", eiter->a.sup_speed);
/* Current speed. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = __constant_cpu_to_be16(4 + 4);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
case PORT_SPEED_1GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
break;
case PORT_SPEED_2GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
break;
case PORT_SPEED_4GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
break;
case PORT_SPEED_8GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
break;
case PORT_SPEED_10GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
break;
case PORT_SPEED_16GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
break;
case PORT_SPEED_32GB:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
break;
default:
- eiter->a.cur_speed =
- __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
break;
}
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203b,
- "Current_Speed=%x.\n", eiter->a.cur_speed);
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Current_Speed = %x.\n", eiter->a.cur_speed);
/* Max frame size. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = __constant_cpu_to_be16(4 + 4);
- max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
le16_to_cpu(icb24->frame_payload_size):
le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x203c,
- "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
/* OS device name. */
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
alen = strlen(QLA2XXX_DRIVER_NAME);
- strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x204b,
- "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20be,
+ "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
/* Hostname. */
- if (strlen(fc_host_system_hostname(vha->host))) {
- ct_req->req.rpa.attrs.count =
- __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- eiter = (struct ct_fdmi_port_attr *) (entries + size);
- eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
"%s", fc_host_system_hostname(vha->host));
- alen = strlen(eiter->a.host_name);
- alen += (alen & 3) ? (4 - (alen & 3)) : 4;
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x203d,
- "HostName=%s.\n", eiter->a.host_name);
}
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d,
+ "HostName=%s.\n", eiter->a.host_name);
+
+ /* Node Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Port Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
+
+ /* Port Symbolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen = strlen(eiter->a.port_sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "port symbolic name = %s\n", eiter->a.port_sym_name);
+
+ /* Port Type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "Port Type = %x.\n", eiter->a.port_type);
+
+ /* Class of Service */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "Supported COS = %08x\n", eiter->a.port_supported_cos);
+
+ /* Port Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* FC4_type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0;
+ eiter->a.port_fc4_type[1] = 0;
+ eiter->a.port_fc4_type[2] = 1;
+ eiter->a.port_fc4_type[3] = 0;
+ eiter->len = cpu_to_be16(4 + 32);
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "Port Active FC4 Type = %02x %02x.\n",
+ eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+
+ /* Port State */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "Port State = %x.\n", eiter->a.port_state);
+
+ /* Number of Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Number of ports = %x.\n", eiter->a.num_ports);
+
+ /* Port Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_ID);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Port Id = %x.\n", eiter->a.port_id);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x203e,
"RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
entries, size);
/* Execute MS IOCB */
@@ -1653,14 +2326,26 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2040,
- "RPA issue IOCB failed (%d).\n", rval);
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "RPA FDMI v2 already registered\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2020,
+ "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
} else {
- ql_dbg(ql_dbg_disc, vha, 0x2041,
- "RPA exiting nornally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "RPA FDMI V2 exiting normally.\n");
}
return rval;
@@ -1675,8 +2360,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
- int rval;
- struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
@@ -1686,6 +2371,26 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
if (rval)
return rval;
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval) {
+ if (rval != QLA_ALREADY_REGISTERED)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmi_dhba(vha);
+ if (rval)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval)
+ goto try_fdmi;
+ }
+ rval = qla2x00_fdmiv2_rpa(vha);
+ if (rval)
+ goto try_fdmi;
+
+ goto out;
+
+try_fdmi:
rval = qla2x00_fdmi_rhba(vha);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
@@ -1700,7 +2405,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
return rval;
}
rval = qla2x00_fdmi_rpa(vha);
-
+out:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 46990f4ceb40..a4dde7e80dbd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1848,7 +1848,9 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+ if (IS_QLA27XX(ha))
+ ha->flags.fac_supported = 1;
+ else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
rval = qla81xx_fac_get_sector_size(vha, &size);
@@ -2196,6 +2198,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->fw_xcb_count);
+ /* D-Port Status */
+ if (IS_DPORT_CAPABLE(ha))
+ mid_init_cb->init_cb.firmware_options_1 |=
+ cpu_to_le16(BIT_7);
+ /* Enable FA-WWPN */
+ ha->flags.fawwpn_enabled =
+ (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
+ ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+ (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
@@ -2224,7 +2235,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
unsigned long wtime, mtime, cs84xx_time;
uint16_t min_wait; /* Minimum wait time if loop is down */
uint16_t wait_time; /* Wait time if loop is coming ready */
- uint16_t state[5];
+ uint16_t state[6];
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
@@ -2329,8 +2340,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} while (1);
ql_dbg(ql_dbg_taskm, vha, 0x803a,
- "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
- state[1], state[2], state[3], state[4], jiffies);
+ "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
+ state[1], state[2], state[3], state[4], state[5], jiffies);
if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
ql_log(ql_log_warn, vha, 0x803b,
@@ -2596,18 +2607,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = __constant_cpu_to_le16(1024);
+ nv->frame_payload_size = 1024;
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
- nv->frame_payload_size = __constant_cpu_to_le16(1024);
+ nv->frame_payload_size = 1024;
}
nv->max_iocb_allocation = __constant_cpu_to_le16(256);
@@ -2643,7 +2654,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
* are valid.
*/
if (ia64_platform_is("sn2")) {
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
if (IS_QLA23XX(ha))
nv->special_options[1] = BIT_7;
}
@@ -3192,7 +3203,7 @@ static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
int rval;
- uint16_t mb[4];
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
if (!IS_IIDMA_CAPABLE(ha))
@@ -4564,6 +4575,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
}
+
+ ha->chip_reset++;
+ /* memory barrier */
+ wmb();
}
/*
@@ -4958,7 +4973,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
nv->version = __constant_cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
@@ -5225,7 +5240,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_log(ql_log_fatal, vha, 0x008f,
"Failed to load segment %d of firmware.\n",
fragment);
- break;
+ return QLA_FUNCTION_FAILED;
}
faddr += dlen;
@@ -5528,7 +5543,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0098,
"Failed to load segment %d of firmware.\n",
fragment);
- break;
+ return QLA_FUNCTION_FAILED;
}
fwcode += dlen;
@@ -5905,7 +5920,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
nv->version = __constant_cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = __constant_cpu_to_le16(2048);
+ nv->frame_payload_size = 2048;
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->port_name[0] = 0x21;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b3b1d6fc2d6c..fee9eb7c8a60 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -279,3 +279,11 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
complete(&ha->mbx_intr_comp);
}
}
+
+static inline void
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+{
+ if (retry_delay)
+ fcport->retry_delay_timestamp = jiffies +
+ (retry_delay * HZ / 10);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 150529d98db4..f0edb07f3198 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1858,6 +1858,17 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
}
/* Generic Control-SRB manipulation functions. */
+
+/* hardware_lock assumed to be held. */
+void *
+qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
+{
+ if (qla2x00_reset_active(vha))
+ return NULL;
+
+ return qla2x00_alloc_iocbs(vha, sp);
+}
+
void *
qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
{
@@ -1901,7 +1912,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
- if (req->cnt < req_cnt) {
+ if (req->cnt < req_cnt + 2) {
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
@@ -1920,7 +1931,7 @@ skip_cmd_array:
req->cnt = req->length -
(req->ring_index - cnt);
}
- if (req->cnt < req_cnt)
+ if (req->cnt < req_cnt + 2)
goto queuing_error;
/* Prep packet */
@@ -2648,7 +2659,7 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-void
+static void
qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 550a4a31f51a..a04a1b1f7f32 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,16 +56,8 @@ qla2100_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
- /* Check for PCI disconnection */
- if (hccr == 0xffff) {
- /*
- * Schedule this on the default system workqueue so that
- * all the adapter workqueues and the DPC thread can be
- * shutdown cleanly.
- */
- schedule_work(&ha->board_disable);
+ if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
- }
if (hccr & HCCR_RISC_PAUSE) {
if (pci_channel_offline(ha->pdev))
break;
@@ -121,21 +113,31 @@ qla2100_intr_handler(int irq, void *dev_id)
}
bool
-qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
{
/* Check for PCI disconnection */
if (reg == 0xffffffff) {
- /*
- * Schedule this on the default system workqueue so that all the
- * adapter workqueues and the DPC thread can be shutdown
- * cleanly.
- */
- schedule_work(&vha->hw->board_disable);
+ if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
+ /*
+ * Schedule this (only once) on the default system
+ * workqueue so that all the adapter workqueues and the
+ * DPC thread can be shutdown cleanly.
+ */
+ schedule_work(&vha->hw->board_disable);
+ }
return true;
} else
return false;
}
+bool
+qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
+{
+ return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
+}
+
/**
* qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
* @irq:
@@ -174,7 +176,7 @@ qla2300_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -573,8 +575,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint32_t rscn_entry, host_pid;
+ uint32_t rscn_entry, host_pid, tmp_pid;
unsigned long flags;
+ fc_port_t *fcport = NULL;
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -730,7 +733,7 @@ skip_rio:
else
ha->link_data_rate = mb[1];
- ql_dbg(ql_dbg_async, vha, 0x500a,
+ ql_log(ql_log_info, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha, ha->link_data_rate));
@@ -743,13 +746,23 @@ skip_rio:
? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
: mbx;
- ql_dbg(ql_dbg_async, vha, 0x500b,
+ ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /*
+ * In case of loop down, restore WWPN from
+ * NVRAM in case of FA-WWPN capable ISP
+ */
+ if (ha->flags.fawwpn_enabled) {
+ void *wwpn = ha->init_cb->port_name;
+
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ }
+
vha->device_flags |= DFLG_NO_CABLE;
qla2x00_mark_all_devices_lost(vha, 1);
}
@@ -908,7 +921,8 @@ skip_rio:
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
atomic_set(&vha->loop_down_timer, 0);
- if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ atomic_read(&vha->loop_state) != LOOP_DEAD) {
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
@@ -920,9 +934,6 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
- ql_log(ql_log_warn, vha, 0x505f,
- "Link is operational (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha, ha->link_data_rate));
/*
* Mark all devices as missing so we will login again.
@@ -969,6 +980,20 @@ skip_rio:
if (qla2x00_is_a_vp_did(vha, rscn_entry))
break;
+ /*
+ * Search for the rport related to this RSCN entry and mark it
+ * as lost.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
+ continue;
+ tmp_pid = fcport->d_id.b24;
+ if (fcport->d_id.b24 == rscn_entry) {
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ break;
+ }
+ }
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -1086,6 +1111,14 @@ skip_rio:
qla83xx_handle_8200_aen(vha, mb);
break;
+ case MBA_DPORT_DIAGNOSTICS:
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
+ mb[1] == 0 ? "start" :
+ mb[1] == 1 ? "done (ok)" :
+ mb[1] == 2 ? "done (error)" : "other");
+ break;
+
default:
ql_dbg(ql_dbg_async, vha, 0x5057,
"Unknown AEN:%04x %04x %04x %04x\n",
@@ -1975,6 +2008,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int logit = 1;
int res = 0;
uint16_t state_flags = 0;
+ uint16_t retry_delay = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2068,6 +2102,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
+ /* Valid values of the retry delay timer are 0x1-0xffef */
+ if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
+ retry_delay = sts24->retry_delay;
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -2102,6 +2139,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
comp_status = CS_DATA_OVERRUN;
/*
+ * Check retry_delay_timer value if we receive a busy or
+ * queue full.
+ */
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY)
+ qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+
+ /*
* Based on Host and scsi status generate status code for Linux
*/
switch (comp_status) {
@@ -2633,7 +2678,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2723,7 +2768,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
* we process the response queue.
*/
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
goto out;
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
@@ -2763,7 +2808,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (qla2x00_check_reg_for_disconnect(vha, hccr))
+ if (qla2x00_check_reg32_for_disconnect(vha, hccr))
goto out;
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
@@ -2798,7 +2843,7 @@ qla24xx_msix_default(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2923,27 +2968,22 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++)
entries[i].entry = i;
- ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
- if (ret) {
- if (ret < MIN_MSIX_COUNT)
- goto msix_failed;
-
+ ret = pci_enable_msix_range(ha->pdev,
+ entries, MIN_MSIX_COUNT, ha->msix_count);
+ if (ret < 0) {
+ ql_log(ql_log_fatal, vha, 0x00c7,
+ "MSI-X: Failed to enable support, "
+ "giving up -- %d/%d.\n",
+ ha->msix_count, ret);
+ goto msix_out;
+ } else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret);
- ha->msix_count = ret;
- ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
- if (ret) {
-msix_failed:
- ql_log(ql_log_fatal, vha, 0x00c7,
- "MSI-X: Failed to enable support, "
- "giving up -- %d/%d.\n",
- ha->msix_count, ret);
- goto msix_out;
- }
- ha->max_rsp_queues = ha->msix_count - 1;
}
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
@@ -3103,10 +3143,11 @@ skip_msi:
}
clear_risc_ints:
+ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ goto fail;
spin_lock_irq(&ha->hardware_lock);
- if (!IS_FWI2_CAPABLE(ha))
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ WRT_REG_WORD(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d9aafc003be2..72971daa2552 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,7 +117,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command = mcp->mb[0];
mboxes = mcp->out_mb;
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+ ql_dbg(ql_dbg_mbx, vha, 0x1111,
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
@@ -373,7 +373,7 @@ premature_exit:
mbx_done:
if (rval) {
- ql_log(ql_log_warn, base_vha, 0x1020,
+ ql_dbg(ql_dbg_disc, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
} else {
@@ -1085,6 +1085,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
if (IS_CNA_CAPABLE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1118,6 +1120,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
}
+ /* If FA-WWN supported */
+ if (mcp->mb[7] & BIT_14) {
+ vha->port_name[0] = MSB(mcp->mb[16]);
+ vha->port_name[1] = LSB(mcp->mb[16]);
+ vha->port_name[2] = MSB(mcp->mb[17]);
+ vha->port_name[3] = LSB(mcp->mb[17]);
+ vha->port_name[4] = MSB(mcp->mb[18]);
+ vha->port_name[5] = LSB(mcp->mb[18]);
+ vha->port_name[6] = MSB(mcp->mb[19]);
+ vha->port_name[7] = LSB(mcp->mb[19]);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+ "FA-WWN acquired %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
}
return rval;
@@ -1546,7 +1564,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
else
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -1560,6 +1578,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
states[2] = mcp->mb[3];
states[3] = mcp->mb[4];
states[4] = mcp->mb[5];
+ states[5] = mcp->mb[6]; /* DPORT status */
}
if (rval != QLA_SUCCESS) {
@@ -3328,8 +3347,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ /* FA-WWN is only for physical port */
+ if (!vp_idx) {
+ void *wwpn = ha->init_cb->port_name;
+
+ if (!MSB(stat)) {
+ if (rptid_entry->vp_idx_map[1] & BIT_6)
+ wwpn = rptid_entry->reserved_4 + 8;
+ }
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
+ "FA-WWN portname %016llx (%x)\n",
+ fc_host_port_name(vha->host), MSB(stat));
+ }
+
vp = vha;
- if (vp_idx == 0 && (MSB(stat) != 1))
+ if (vp_idx == 0)
goto reg_needed;
if (MSB(stat) != 0 && MSB(stat) != 2) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 89998244f48d..5c2e0317f1c0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -702,6 +702,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
+ req->out_ptr = (void *)(req->ring + req->length);
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -811,6 +812,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 4775baa8b6a0..80867599527d 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -695,11 +695,11 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
}
char *
-qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%s", ha->mr.fw_version);
+ snprintf(str, size, "%s", ha->mr.fw_version);
return str;
}
@@ -1551,7 +1551,10 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
ha->mr.fw_reset_timer_tick =
QLAFX00_MAX_RESET_INTERVAL;
}
- ha->mr.old_aenmbx0_state = aenmbx0;
+ if (ha->mr.old_aenmbx0_state != aenmbx0) {
+ ha->mr.old_aenmbx0_state = aenmbx0;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ }
ha->mr.fw_reset_timer_tick--;
}
if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
@@ -1675,17 +1678,16 @@ qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
fc_port_t *fcport;
/* Check for matching device in remote port list. */
- fcport = NULL;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->tgt_id == tgt_id) {
ql_dbg(ql_dbg_async, vha, 0x5072,
"Matching fcport(%p) found with TGT-ID: 0x%x "
"and Remote TGT_ID: 0x%x\n",
fcport, fcport->tgt_id, tgt_id);
- break;
+ return fcport;
}
}
- return fcport;
+ return NULL;
}
static void
@@ -2924,7 +2926,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; clr_intr = 0) {
stat = QLAFX00_RD_INTR_REG(ha);
- if (qla2x00_check_reg_for_disconnect(vha, stat))
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
if (!intr_stat)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 58f3c912d96e..54cb2ac9339b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -857,7 +857,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
break;
if (timeout >= qla82xx_rom_lock_timeout) {
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
- ql_log(ql_log_warn, vha, 0xb157,
+ ql_dbg(ql_dbg_p3p, vha, 0xb157,
"%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
__func__, ha->portnum, lock_owner);
return -1;
@@ -2123,7 +2123,7 @@ qla82xx_msix_default(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
do {
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
@@ -2184,7 +2184,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->host_int, 0);
@@ -2219,7 +2219,7 @@ qla82xx_poll(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
host_int = RD_REG_DWORD(&reg->host_int);
- if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index da9e3902f219..24a852828b5d 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -399,7 +399,7 @@ qla8044_idc_lock(struct qla_hw_data *ha)
*/
ql_dbg(ql_dbg_p3p, vha, 0xb08a,
"%s: IDC lock Recovery by %d "
- "failed, Retrying timout\n", __func__,
+ "failed, Retrying timeout\n", __func__,
ha->portnum);
timeout = 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index be9698d920c2..dabd25429c58 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
"based on total number of SG elements.");
int ql2xfdmienable=1;
-module_param(ql2xfdmienable, int, S_IRUGO);
+module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
@@ -238,7 +238,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
+static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
+static void qla83xx_disable_laser(scsi_qla_host_t *vha);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -547,14 +549,13 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
}
static char *
-qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
char un_str[10];
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
- ha->fw_minor_version,
- ha->fw_subminor_version);
+ snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
+ ha->fw_minor_version, ha->fw_subminor_version);
if (ha->fw_attributes & BIT_9) {
strcat(str, "FLX");
@@ -586,11 +587,11 @@ qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
}
static char *
-qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
{
struct qla_hw_data *ha = vha->hw;
- sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
+ snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
return str;
}
@@ -730,6 +731,15 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
@@ -860,8 +870,10 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- while ((!(vha->flags.online) || ha->dpc_active ||
- ha->flags.mbox_busy))
+ while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
+ ha->flags.mbox_busy) ||
+ test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+ test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
msleep(1000);
}
@@ -1351,6 +1363,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ qlt_host_reset_handler(ha);
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
@@ -2384,6 +2398,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
ha->tgt.enable_class_2 = ql2xenableclass2;
+ INIT_LIST_HEAD(&ha->tgt.q_full_list);
+ spin_lock_init(&ha->tgt.q_full_lock);
/* Clear our data area */
ha->bars = bars;
@@ -2527,7 +2543,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- req_length = REQUEST_ENTRY_CNT_24XX;
+ req_length = REQUEST_ENTRY_CNT_83XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
@@ -2631,6 +2647,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, base_vha);
+ set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
host = base_vha->host;
base_vha->req = req;
@@ -2923,10 +2940,11 @@ skip_dpc:
pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
base_vha->host_no,
- ha->isp_ops->fw_version_str(base_vha, fw_str));
+ ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
qlt_add_target(ha, base_vha);
+ clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
return 0;
probe_init_failed:
@@ -2954,16 +2972,8 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (IS_QLA82XX(ha)) {
- qla82xx_idc_lock(ha);
- qla82xx_clear_drv_active(ha);
- qla82xx_idc_unlock(ha);
- }
- if (IS_QLA8044(ha)) {
- qla8044_idc_lock(ha);
- qla8044_clear_drv_active(ha);
- qla8044_idc_unlock(ha);
- }
+ qla2x00_clear_drv_active(ha);
+
iospace_config_failed:
if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
@@ -3026,6 +3036,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
qla2x00_free_irqs(vha);
qla2x00_free_fw_dump(ha);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
}
/* Deletes all the virtual ports for a given ha */
@@ -3119,10 +3132,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
}
static void
-qla2x00_clear_drv_active(scsi_qla_host_t *vha)
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
{
- struct qla_hw_data *ha = vha->hw;
-
if (IS_QLA8044(ha)) {
qla8044_idc_lock(ha);
qla8044_clear_drv_active(ha);
@@ -3140,15 +3151,25 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
+
+ /* Indicate device removal to prevent future board_disable and wait
+ * until any pending board_disable has completed. */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ cancel_work_sync(&ha->board_disable);
+
/*
- * If the PCI device is disabled that means that probe failed and any
- * resources should be have cleaned up on probe exit.
+ * If the PCI device is disabled then there was a PCI-disconnect and
+ * qla2x00_disable_board_on_pci_error has taken care of most of the
+ * resources.
*/
- if (!atomic_read(&pdev->enable_cnt))
+ if (!atomic_read(&pdev->enable_cnt)) {
+ scsi_host_put(base_vha->host);
+ kfree(ha);
+ pci_set_drvdata(pdev, NULL);
return;
-
- base_vha = pci_get_drvdata(pdev);
- ha = base_vha->hw;
+ }
qla2x00_wait_for_hba_ready(base_vha);
@@ -3173,6 +3194,10 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
+ /* Laser should be disabled only for ISP2031 */
+ if (IS_QLA2031(ha))
+ qla83xx_disable_laser(base_vha);
+
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3191,9 +3216,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_device(base_vha);
- scsi_host_put(base_vha->host);
+ qla2x00_clear_drv_active(ha);
- qla2x00_clear_drv_active(base_vha);
+ scsi_host_put(base_vha->host);
qla2x00_unmap_iobases(ha);
@@ -4808,18 +4833,15 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
- scsi_host_put(base_vha->host);
-
qla2x00_unmap_iobases(ha);
pci_release_selected_regions(ha->pdev, ha->bars);
- kfree(ha);
- ha = NULL;
-
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ /*
+ * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
+ */
}
/**************************************************************************
@@ -5192,13 +5214,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
*/
if (!pci_channel_offline(ha->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (w == 0xffff)
- /*
- * Schedule this on the default system workqueue so that
- * all the adapter workqueues and the DPC thread can be
- * shutdown cleanly.
- */
- schedule_work(&ha->board_disable);
+ qla2x00_check_reg16_for_disconnect(vha, w);
}
/* Make sure qla82xx_watchdog is run only for physical port */
@@ -5706,6 +5722,32 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
+static void
+qla83xx_disable_laser(scsi_qla_host_t *vha)
+{
+ uint32_t reg, data, fn;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
+
+ /* pci func #/port # */
+ ql_dbg(ql_dbg_init, vha, 0x004b,
+ "Disabling Laser for hba: %p\n", vha);
+
+ fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
+ (BIT_15|BIT_14|BIT_13|BIT_12));
+
+ fn = (fn >> 12);
+
+ if (fn & 1)
+ reg = PORT_1_2031;
+ else
+ reg = PORT_0_2031;
+
+ data = LASER_OFF_2031;
+
+ qla83xx_wr_reg(vha, reg, data);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bca173e56f16..b656a05613e8 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2580,7 +2580,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t faddr, left, burst;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha))
goto try_fast;
if (offset & 0xfff)
goto slow_read;
@@ -3091,7 +3092,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
ql_dbg(ql_dbg_init, vha, 0x0060,
- "Firmware revision %d.%d.%d.%d.\n",
+ "Firmware revision %d.%d.%d (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
}
@@ -3162,7 +3163,7 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
}
if (pos < end - len && *pos != 0x78)
- return snprintf(str, size, "%.*s", len, pos + 3);
+ return scnprintf(str, size, "%.*s", len, pos + 3);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e632e14180cf..829752cfd73f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -42,6 +42,11 @@
#include "qla_def.h"
#include "qla_target.h"
+static int ql2xtgt_tape_enable;
+module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xtgt_tape_enable,
+ "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
+
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
@@ -54,6 +59,8 @@ MODULE_PARM_DESC(qlini_mode,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+static int temp_sam_status = SAM_STAT_BUSY;
+
/*
* From scsi/fc/fc_fcp.h
*/
@@ -101,6 +108,10 @@ static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock);
+static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd);
+static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull);
/*
* Global Variables
*/
@@ -178,6 +189,27 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
return NULL;
}
+static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+
+ vha->hw->tgt.num_pend_cmds++;
+ if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
+ vha->hw->qla_stats.stat_max_pend_cmds =
+ vha->hw->tgt.num_pend_cmds;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+ vha->hw->tgt.num_pend_cmds--;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
@@ -1008,6 +1040,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
"qla_target(%d): Sending 24xx Notify Ack %d\n",
vha->vp_idx, nack->u.isp24.status);
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1031,7 +1065,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
return;
- resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (!resp) {
ql_dbg(ql_dbg_tgt, vha, 0xe04a,
"qla_target(%d): %s failed: unable to allocate "
@@ -1085,6 +1119,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
vha->vha_tgt.qla_tgt->abts_resp_expected++;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1102,7 +1138,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1130,6 +1166,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
CTIO7_FLAGS_TERMINATE);
ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
@@ -1178,6 +1216,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+ mcmd->reset_count = vha->hw->chip_reset;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
@@ -1300,6 +1339,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->u.status1.response_len = __constant_cpu_to_le16(8);
ctio->u.status1.sense_data[0] = resp_code;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(ha, ha->req);
}
@@ -1321,6 +1362,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
mcmd, mcmd->fc_tm_rsp, mcmd->flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ ql_dbg(ql_dbg_async, vha, 0xe100,
+ "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), mcmd->reset_count,
+ ha->chip_reset);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
@@ -1397,8 +1453,6 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
}
}
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
- prm->seg_cnt, prm->req_cnt);
return 0;
out_err:
@@ -1431,17 +1485,12 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
uint32_t req_cnt)
{
- struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ha->iobase;
- uint32_t cnt;
+ uint32_t cnt, cnt_in;
if (vha->req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+ cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
- ql_dbg(ql_dbg_tgt, vha, 0xe00a,
- "Request ring circled: cnt=%d, vha->->ring_index=%d, "
- "vha->req->cnt=%d, req_cnt=%d\n", cnt,
- vha->req->ring_index, vha->req->cnt, req_cnt);
if (vha->req->ring_index < cnt)
vha->req->cnt = cnt - vha->req->ring_index;
else
@@ -1450,11 +1499,10 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
}
if (unlikely(vha->req->cnt < (req_cnt + 2))) {
- ql_dbg(ql_dbg_tgt, vha, 0xe00b,
- "qla_target(%d): There is no room in the "
- "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
- "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
- vha->req->cnt, req_cnt);
+ ql_dbg(ql_dbg_io, vha, 0x305a,
+ "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
+ vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
return -EAGAIN;
}
vha->req->cnt -= req_cnt;
@@ -1491,7 +1539,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+ ql_dbg(ql_dbg_io, vha, 0x305b,
"qla_target(%d): Ran out of "
"empty cmd slots in ha %p\n", vha->vp_idx, ha);
h = QLA_TGT_NULL_HANDLE;
@@ -1548,9 +1596,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
- ql_dbg(ql_dbg_tgt, vha, 0xe00c,
- "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
- vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
return 0;
}
@@ -1608,14 +1653,6 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
}
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
- ql_dbg(ql_dbg_tgt, vha, 0xe00d,
- "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
- (long long unsigned int)
- pci_dma_hi32(sg_dma_address(prm->sg)),
- (long long unsigned int)
- pci_dma_lo32(sg_dma_address(prm->sg)),
- (int)sg_dma_len(prm->sg));
-
prm->sg = sg_next(prm->sg);
}
}
@@ -1633,11 +1670,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe00e,
- "iocb->scsi_status=%x, iocb->flags=%x\n",
- le16_to_cpu(pkt24->u.status0.scsi_status),
- le16_to_cpu(pkt24->u.status0.flags));
-
pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
/* Setup packet address segment pointer */
@@ -1655,7 +1687,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
}
/* If scatter gather */
- ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
/* Load command entry data segments */
for (cnt = 0;
@@ -1670,14 +1701,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
}
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
- ql_dbg(ql_dbg_tgt, vha, 0xe010,
- "S/G Segment phys_addr=%llx:%llx, len=%d\n",
- (long long unsigned int)pci_dma_hi32(sg_dma_address(
- prm->sg)),
- (long long unsigned int)pci_dma_lo32(sg_dma_address(
- prm->sg)),
- (int)sg_dma_len(prm->sg));
-
prm->sg = sg_next(prm->sg);
}
@@ -1708,6 +1731,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
se_cmd, cmd->tag);
cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
@@ -1715,10 +1739,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
- vha->vp_idx, cmd->tag,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
prm->cmd = cmd;
prm->tgt = tgt;
prm->rq_result = scsi_status;
@@ -1729,15 +1749,10 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->req_cnt = 1;
prm->add_status_pkt = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
- prm->rq_result, xmit_type);
-
/* Send marker if required */
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
return -EFAULT;
- ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
-
if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
if (qlt_pci_map_calc_cnt(prm) != 0)
return -EAGAIN;
@@ -1747,7 +1762,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_tgt, vha, 0xe014,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
"Residual underflow: %d (tag %d, "
"op %x, bufflen %d, rq_result %x)\n", prm->residual,
cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1755,7 +1770,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->rq_result |= SS_RESIDUAL_UNDER;
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_tgt, vha, 0xe015,
+ ql_dbg(ql_dbg_io, vha, 0x305d,
"Residual overflow: %d (tag %d, "
"op %x, bufflen %d, rq_result %x)\n", prm->residual,
cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1778,10 +1793,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
}
}
- ql_dbg(ql_dbg_tgt, vha, 0xe016,
- "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
- prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
-
return 0;
}
@@ -2310,6 +2321,21 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe101,
+ "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(vha, full_req_cnt);
if (unlikely(res))
@@ -2358,8 +2384,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
struct ctio7_to_24xx *ctio =
(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
- ql_dbg(ql_dbg_tgt, vha, 0xe019,
- "Building additional status packet\n");
+ ql_dbg(ql_dbg_io, vha, 0x305e,
+ "Building additional status packet 0x%p.\n",
+ ctio);
/*
* T10Dif: ctio_crc2_to_fw overlay ontop of
@@ -2391,11 +2418,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ cmd->cmd_sent_to_fw = 1;
- ql_dbg(ql_dbg_tgt, vha, 0xe01a,
- "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
- pkt, scsi_status);
-
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2430,17 +2456,27 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
return -EIO;
- ql_dbg(ql_dbg_tgt, vha, 0xe01b,
- "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
- __func__, (int)vha->vp_idx, &cmd->se_cmd,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe102,
+ "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(vha, prm.req_cnt);
if (res != 0)
@@ -2460,7 +2496,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm, vha);
cmd->state = QLA_TGT_STATE_NEED_DATA;
+ cmd->cmd_sent_to_fw = 1;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2503,7 +2542,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
"iocb(s) %p Returned STATUS.\n", sts);
ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+ "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
@@ -2626,7 +2665,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
@@ -2669,6 +2708,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
if (ctio24->u.status1.residual != 0)
ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
return ret;
}
@@ -2684,24 +2725,19 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
if (ha_locked) {
rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
goto done;
}
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
done:
- /*
- * Terminate exchange will tell fw to release any active CTIO
- * that's in FW posession and cleanup the exchange.
- *
- * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
- * down at FW. Free the cmd later when CTIO comes back later
- * w/aborted(0x2) status.
- *
- * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
- * back w/some err. Free the cmd now.
- */
- if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
+ !cmd->cmd_sent_to_fw)) {
if (!ha_locked && !in_interrupt())
msleep(250); /* just in case */
@@ -2712,6 +2748,53 @@ done:
return;
}
+static void qlt_init_term_exchange(struct scsi_qla_host *vha)
+{
+ struct list_head free_list;
+ struct qla_tgt_cmd *cmd, *tcmd;
+
+ vha->hw->tgt.leak_exchg_thresh_hold =
+ (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+
+ cmd = tcmd = NULL;
+ if (!list_empty(&vha->hw->tgt.q_full_list)) {
+ INIT_LIST_HEAD(&free_list);
+ list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ }
+ vha->hw->tgt.num_qfull_cmds_dropped = 0;
+}
+
+static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+{
+ uint32_t total_leaked;
+
+ total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
+
+ if (vha->hw->tgt.leak_exchg_thresh_hold &&
+ (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe079,
+ "Chip reset due to exchange starvation: %d/%d.\n",
+ total_leaked, vha->hw->fw_xcb_count);
+
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+
+}
+
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
@@ -2721,7 +2804,13 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
__func__, &cmd->se_cmd,
be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ BUG_ON(cmd->cmd_in_wq);
+
+ if (!cmd->q_full)
+ qlt_decr_num_pend_cmds(cmd->vha);
+
BUG_ON(cmd->sg_mapped);
+ cmd->jiffies_at_free = get_jiffies_64();
if (unlikely(cmd->free_sg))
kfree(cmd->sg);
@@ -2729,6 +2818,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
WARN_ON(1);
return;
}
+ cmd->jiffies_at_free = get_jiffies_64();
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -2742,6 +2832,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
struct qla_tgt_srr_imm *imm;
tgt->ctio_srr_id++;
+ cmd->cmd_flags |= BIT_15;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
"qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
@@ -2863,11 +2954,9 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
CTIO_INTERMEDIATE_HANDLE_MARK);
if (handle != QLA_TGT_NULL_HANDLE) {
- if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
- "SKIP_HANDLE CTIO\n");
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
return NULL;
- }
+
/* handle-1 is actually used */
if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
@@ -2894,6 +2983,81 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return cmd;
}
+/* hardware_lock should be held by caller. */
+static void
+qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ handle = qlt_make_handle(vha);
+
+ /* TODO: fix debug message type and ids. */
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_io, vha, 0xff00,
+ "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ ql_dbg(ql_dbg_io, vha, 0xff01,
+ "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_io, vha, 0xff02,
+ "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0xff03,
+ "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+ cmd->state);
+ dump_stack();
+ }
+
+ cmd->cmd_flags |= BIT_12;
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+void
+qlt_host_reset_handler(struct qla_hw_data *ha)
+{
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ scsi_qla_host_t *vha = NULL;
+ struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
+ uint32_t i;
+
+ if (!base_vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || qla_ini_mode_enabled(base_vha)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Target mode disabled\n");
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
+ "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
+ base_vha->dpc_flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
+ cmd = qlt_get_cmd(base_vha, i);
+ if (!cmd)
+ continue;
+ /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
+ vha = cmd->vha;
+ qlt_abort_cmd_on_host_reset(vha, cmd);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -2905,10 +3069,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
- ql_dbg(ql_dbg_tgt, vha, 0xe01e,
- "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
- vha->vp_idx, ctio, status, handle);
-
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
if (status != CTIO_SUCCESS) {
@@ -2925,6 +3085,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
se_cmd = &cmd->se_cmd;
tfo = se_cmd->se_tfo;
+ cmd->cmd_sent_to_fw = 0;
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
@@ -3011,7 +3172,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
* level.
*/
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ cmd->cmd_flags |= BIT_13;
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
}
@@ -3019,7 +3181,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+ ;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
int rx_status = 0;
@@ -3030,10 +3192,6 @@ skip_term:
else
cmd->write_data_transferred = 1;
- ql_dbg(ql_dbg_tgt, vha, 0xe020,
- "Data received, context %x, rx_status %d\n",
- 0x0, rx_status);
-
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
@@ -3051,6 +3209,7 @@ skip_term:
dump_stack();
}
+
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3103,6 +3262,8 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_1;
if (tgt->tgt_stop)
goto out_term;
@@ -3128,11 +3289,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
&atio->u.isp24.fcp_cmnd.add_cdb[
atio->u.isp24.fcp_cmnd.add_cdb_len]));
- ql_dbg(ql_dbg_tgt, vha, 0xe022,
- "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
- cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
- cmd->atio.u.isp24.fcp_hdr.ox_id);
-
ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
@@ -3146,13 +3302,16 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
return;
out_term:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+ ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
/*
* cmd has not sent to target yet, so pass NULL as the second
* argument to qlt_send_term_exchange() and free the memory here.
*/
+ cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+
+ qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3183,6 +3342,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
+ qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
@@ -3264,7 +3424,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
return -EFAULT;
}
@@ -3277,6 +3437,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
memcpy(&op->atio, atio, sizeof(*atio));
+ op->vha = vha;
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
@@ -3288,12 +3449,19 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+ ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
ha->tgt.tgt_ops->put_sess(sess);
return -ENOMEM;
}
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
+ cmd->cmd_in_wq = 1;
+ cmd->cmd_flags |= BIT_0;
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3327,6 +3495,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
}
mcmd->tmr_func = fn;
mcmd->flags = flags;
+ mcmd->reset_count = vha->hw->chip_reset;
switch (fn) {
case QLA_TGT_CLEAR_ACA:
@@ -3462,6 +3631,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
lun = a->u.isp24.fcp_cmnd.lun;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ mcmd->reset_count = vha->hw->chip_reset;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -3753,8 +3923,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
qlt_send_notify_ack(vha, ntfy,
0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (xmit_type & QLA_TGT_XMIT_DATA)
+ if (xmit_type & QLA_TGT_XMIT_DATA) {
+ cmd->cmd_flags |= BIT_8;
qlt_rdy_to_xfer(cmd);
+ }
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
"qla_target(%d): SRR for out data for cmd "
@@ -3772,8 +3944,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
}
/* Transmit response in case of status and data-in cases */
- if (resp)
+ if (resp) {
+ cmd->cmd_flags |= BIT_7;
qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+ }
return;
@@ -3786,8 +3960,10 @@ out_reject:
if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
dump_stack();
- } else
+ } else {
+ cmd->cmd_flags |= BIT_9;
qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3901,7 +4077,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
tgt->imm_srr_id++;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
vha->vp_idx);
imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
@@ -4121,7 +4297,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
* This function sends busy to ISP 2xxx or 24xx.
*/
-static void qlt_send_busy(struct scsi_qla_host *vha,
+static int __qlt_send_busy(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status)
{
struct ctio7_to_24xx *ctio24;
@@ -4133,16 +4309,16 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.s_id);
if (!sess) {
qlt_send_term_exchange(vha, NULL, atio, 1);
- return;
+ return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (!pkt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+ ql_dbg(ql_dbg_io, vha, 0x3063,
"qla_target(%d): %s failed: unable to allocate "
"request packet", vha->vp_idx, __func__);
- return;
+ return -ENOMEM;
}
pkt->entry_count = 1;
@@ -4167,13 +4343,192 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
*/
ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len]);
- if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
-
+ /* Memory Barrier */
+ wmb();
qla2x00_start_iocbs(vha, vha->req);
+ return 0;
+}
+
+/*
+ * This routine is used to allocate a command for either a QFull condition
+ * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
+ * out previously.
+ */
+static void
+qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ struct se_session *se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_io, vha, 0x300a,
+ "New command while device %p is shutting down\n", tgt);
+ return;
+ }
+
+ if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ ql_dbg(ql_dbg_io, vha, 0x3068,
+ "qla_target(%d): %s: QFull CMD dropped[%d]\n",
+ vha->vp_idx, __func__,
+ vha->hw->tgt.num_qfull_cmds_dropped);
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id
+ (vha, atio->u.isp24.fcp_hdr.s_id);
+ if (!sess)
+ return;
+
+ se_sess = sess->se_sess;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
+ return;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ if (!cmd) {
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "qla_target(%d): %s: Allocation of cmd failed\n",
+ vha->vp_idx, __func__);
+
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ qlt_incr_num_pend_cmds(vha);
+ INIT_LIST_HEAD(&cmd->cmd_list);
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+
+ cmd->tgt = vha->vha_tgt.qla_tgt;
+ cmd->vha = vha;
+ cmd->reset_count = vha->hw->chip_reset;
+ cmd->q_full = 1;
+
+ if (qfull) {
+ cmd->q_full = 1;
+ /* NOTE: borrowing the state field to carry the status */
+ cmd->state = status;
+ } else
+ cmd->term_exchg = 1;
+
+ list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
+
+ vha->hw->tgt.num_qfull_cmds_alloc++;
+ if (vha->hw->tgt.num_qfull_cmds_alloc >
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
+ vha->hw->tgt.num_qfull_cmds_alloc;
+}
+
+int
+qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ struct qla_tgt_cmd *cmd, *tcmd;
+ struct list_head free_list;
+ int rc = 0;
+
+ if (list_empty(&ha->tgt.q_full_list))
+ return 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (list_empty(&ha->tgt.q_full_list)) {
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ return 0;
+ }
+
+ list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+ if (cmd->q_full)
+ /* cmd->state is a borrowed field to hold status */
+ rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+ else if (cmd->term_exchg)
+ rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+
+ if (rc == -ENOMEM)
+ break;
+
+ if (cmd->q_full)
+ ql_dbg(ql_dbg_io, vha, 0x3006,
+ "%s: busy sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else if (cmd->term_exchg)
+ ql_dbg(ql_dbg_io, vha, 0x3007,
+ "%s: Term exchg sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else
+ ql_dbg(ql_dbg_io, vha, 0x3008,
+ "%s: Unexpected cmd in QFull list %p\n", __func__,
+ cmd);
+
+ list_del(&cmd->cmd_list);
+ list_add_tail(&cmd->cmd_list, &free_list);
+
+ /* piggy back on hardware_lock for protection */
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ cmd = NULL;
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ }
+ return rc;
+}
+
+static void
+qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ int rc = 0;
+
+ rc = __qlt_send_busy(vha, atio, status);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, status, 1);
+}
+
+static int
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t status;
+
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+ return 0;
+
+ status = temp_sam_status;
+ qlt_send_busy(vha, atio, status);
+ return 1;
}
/* ha->hardware_lock supposed to be held on entry */
@@ -4186,14 +4541,10 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
int rc;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+ ql_dbg(ql_dbg_io, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe02c,
- "qla_target(%d): ATIO pkt %p: type %02x count %02x",
- vha->vp_idx, atio, atio->u.raw.entry_type,
- atio->u.raw.entry_count);
/*
* In tgt_stop mode we also should allow all requests to pass.
* Otherwise, some commands can stuck.
@@ -4203,33 +4554,28 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
- ql_dbg(ql_dbg_tgt, vha, 0xe02d,
- "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
- vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
- atio->u.isp24.fcp_cmnd.rddata,
- atio->u.isp24.fcp_cmnd.wrdata,
- atio->u.isp24.fcp_cmnd.cdb[0],
- atio->u.isp24.fcp_cmnd.add_cdb_len,
- be32_to_cpu(get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len])),
- atio->u.isp24.fcp_hdr.s_id[0],
- atio->u.isp24.fcp_hdr.s_id[1],
- atio->u.isp24.fcp_hdr.s_id[2]);
-
if (unlikely(atio->u.isp24.exchange_addr ==
ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe058,
+ ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
"sending QUEUE_FULL\n", vha->vp_idx);
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
break;
}
- if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+
+
+
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
rc = qlt_handle_cmd_for_atio(vha, atio);
- else
+ } else {
rc = qlt_handle_task_mgmt(vha, atio);
+ }
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
@@ -4293,11 +4639,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
return;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe02f,
- "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
- "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
- pkt->entry_count, pkt->entry_status, pkt->handle);
-
/*
* In tgt_stop mode we also should allow all requests to pass.
* Otherwise, some commands can stuck.
@@ -4310,9 +4651,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe030,
- "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
- entry->entry_type, vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4323,15 +4661,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
- ql_dbg(ql_dbg_tgt, vha, 0xe031,
- "ACCEPT_TGT_IO instance %d status %04x "
- "lun %04x read/write %d data_length %04x "
- "target_id %02x rx_id %04x\n ", vha->vp_idx,
- le16_to_cpu(atio->u.isp2x.status),
- le16_to_cpu(atio->u.isp2x.lun),
- atio->u.isp2x.execution_codes,
- le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
- atio), atio->u.isp2x.rx_id);
if (atio->u.isp2x.status !=
__constant_cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -4340,10 +4669,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
le16_to_cpu(atio->u.isp2x.status));
break;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe032,
- "FCP CDB: 0x%02x, sizeof(cdb): %lu",
- atio->u.isp2x.cdb[0], (unsigned long
- int)sizeof(atio->u.isp2x.cdb));
+
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
@@ -4376,8 +4707,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe033,
- "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4387,8 +4716,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
- vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4492,11 +4819,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
- ql_dbg(ql_dbg_tgt, vha, 0xe039,
- "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
- vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
- ha->operating_mode, ha->current_topology);
-
if (!ha->tgt.tgt_ops)
return;
@@ -4573,11 +4895,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
default:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
- "qla_target(%d): Async event %#x occurred: "
- "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
}
@@ -4598,8 +4915,6 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
return NULL;
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
-
fcport->loop_id = loop_id;
rc = qla2x00_get_port_database(vha, fcport, 0);
@@ -4898,6 +5213,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
qlt_release(vha->vha_tgt.qla_tgt);
return 0;
}
+
+ /* free left over qfull cmds */
+ qlt_init_term_exchange(vha);
+
mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
@@ -5295,8 +5614,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
- /* Enable FC tapes support */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC Tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC Tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
/* Disable Full Login after LIP */
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
@@ -5378,8 +5702,13 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
- /* Enable FC tapes support */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
/* Disable Full Login after LIP */
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d1d24fb0160a..8ff330f7d6f5 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -915,6 +915,10 @@ struct qla_tgt_cmd {
unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
+ unsigned int q_full:1;
+ unsigned int term_exchg:1;
+ unsigned int cmd_sent_to_fw:1;
+ unsigned int cmd_in_wq:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -923,10 +927,12 @@ struct qla_tgt_cmd {
uint32_t tag;
uint32_t unpacked_lun;
enum dma_data_direction dma_data_direction;
+ uint32_t reset_count;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
+ struct list_head cmd_list;
struct atio_from_isp atio;
/* t10dif */
@@ -935,6 +941,29 @@ struct qla_tgt_cmd {
uint32_t blk_sz;
struct crc_context *ctx;
+ uint64_t jiffies_at_alloc;
+ uint64_t jiffies_at_free;
+ /* BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+ * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ */
+ uint32_t cmd_flags;
};
struct qla_tgt_sess_work_param {
@@ -958,6 +987,7 @@ struct qla_tgt_mgmt_cmd {
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
+ uint32_t reset_count;
#define QLA24XX_MGMT_SEND_NACK 1
union {
struct atio_from_isp atio;
@@ -1089,5 +1119,6 @@ extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index cb9a0c4bc419..a8c0c7362e48 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -128,18 +128,10 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len)
static inline void
qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
{
- ulong cnt = size;
- if (buf && mem) {
+ if (buf && mem && size) {
buf += *len;
- while (cnt >= sizeof(uint32_t)) {
- *(__le32 *)buf = cpu_to_le32p(mem);
- buf += sizeof(uint32_t);
- mem += sizeof(uint32_t);
- cnt -= sizeof(uint32_t);
- }
- if (cnt)
- memcpy(buf, mem, cnt);
+ memcpy(buf, mem, size);
}
*len += size;
}
@@ -151,8 +143,6 @@ qla27xx_read8(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_BYTE((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd011,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -164,8 +154,6 @@ qla27xx_read16(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_WORD((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd012,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -177,8 +165,6 @@ qla27xx_read32(void *window, void *buf, ulong *len)
if (buf) {
value = RD_REG_DWORD((__iomem void *)window);
- ql_dbg(ql_dbg_misc, NULL, 0xd013,
- "%s: -> %x\n", __func__, value);
}
qla27xx_insert32(value, buf, len);
}
@@ -197,10 +183,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
{
void *window = (void *)reg + offset;
- if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd014,
- "%s: @%x\n", __func__, offset);
- }
qla27xx_read32(window, buf, len);
}
@@ -211,8 +193,6 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
__iomem void *window = reg + offset;
if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd015,
- "%s: @%x <- %x\n", __func__, offset, data);
WRT_REG_DWORD(window, data);
}
}
@@ -225,11 +205,6 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
void *window = (void *)reg + offset;
void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
- if (buf) {
- ql_dbg(ql_dbg_misc, NULL, 0xd016,
- "%s: base=%x offset=%x count=%x width=%x\n",
- __func__, addr, offset, count, width);
- }
qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
while (count--) {
qla27xx_insert32(addr, buf, len);
@@ -380,14 +355,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ent->t262.start_addr = start;
ent->t262.end_addr = end;
}
- } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
- ql_dbg(ql_dbg_misc, vha, 0xd021,
- "%s: unsupported ddr ram\n", __func__);
- qla27xx_skip_entry(ent, buf);
- goto done;
} else {
ql_dbg(ql_dbg_misc, vha, 0xd022,
- "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+ "%s: unknown area %x\n", __func__, ent->t262.ram_area);
qla27xx_skip_entry(ent, buf);
goto done;
}
@@ -402,8 +372,6 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
- ql_dbg(ql_dbg_misc, vha, 0xd024,
- "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
buf += *len;
qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
}
@@ -448,13 +416,9 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
- ql_dbg(ql_dbg_misc, vha, 0xd025,
- "%s: unsupported atio queue\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
- "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+ "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
qla27xx_skip_entry(ent, buf);
}
@@ -549,17 +513,9 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
- ql_dbg(ql_dbg_misc, vha, 0xd029,
- "%s: unsupported exchange offload buffer\n", __func__);
- qla27xx_skip_entry(ent, buf);
- } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
- ql_dbg(ql_dbg_misc, vha, 0xd02a,
- "%s: unsupported extended login buffer\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02b,
- "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+ "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
}
@@ -695,13 +651,9 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
- } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
- ql_dbg(ql_dbg_misc, vha, 0xd02e,
- "%s: unsupported atio queue\n", __func__);
- qla27xx_skip_entry(ent, buf);
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
- "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
+ "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
qla27xx_skip_entry(ent, buf);
}
@@ -715,6 +667,32 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
}
static int
+qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong offset = offsetof(typeof(*ent), t275.buffer);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd213,
+ "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
+ if (!ent->t275.length) {
+ ql_dbg(ql_dbg_misc, vha, 0xd020,
+ "%s: buffer zero length\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+ if (offset + ent->t275.length > ent->hdr.entry_size) {
+ ql_dbg(ql_dbg_misc, vha, 0xd030,
+ "%s: buffer overflow\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+done:
+ return false;
+}
+
+static int
qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -726,7 +704,7 @@ qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
}
struct qla27xx_fwdt_entry_call {
- int type;
+ uint type;
int (*call)(
struct scsi_qla_host *,
struct qla27xx_fwdt_entry *,
@@ -756,18 +734,21 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
{ ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
{ ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
{ ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
+ { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
{ -1 , qla27xx_fwdt_entry_other }
};
-static inline int (*qla27xx_find_entry(int type))
+static inline int (*qla27xx_find_entry(uint type))
(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
{
struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
- while (list->type != -1 && list->type != type)
+ while (list->type < type)
list++;
- return list->call;
+ if (list->type == type)
+ return list->call;
+ return qla27xx_fwdt_entry_other;
}
static inline void *
@@ -792,6 +773,15 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
break;
ent = qla27xx_next_entry(ent);
}
+
+ if (count)
+ ql_dbg(ql_dbg_misc, vha, 0xd018,
+ "%s: residual count (%lx)\n", __func__, count);
+
+ if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+ ql_dbg(ql_dbg_misc, vha, 0xd019,
+ "%s: missing end (%lx)\n", __func__, count);
+
ql_dbg(ql_dbg_misc, vha, 0xd01b,
"%s: len=%lx\n", __func__, *len);
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 1967424c8e64..141c1c5e73f4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -53,6 +53,7 @@ struct __packed qla27xx_fwdt_template {
#define ENTRY_TYPE_RDREMRAM 272
#define ENTRY_TYPE_PCICFG 273
#define ENTRY_TYPE_GET_SHADOW 274
+#define ENTRY_TYPE_WRITE_BUF 275
#define CAPTURE_FLAG_PHYS_ONLY BIT_0
#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -193,6 +194,11 @@ struct __packed qla27xx_fwdt_entry {
uint8_t queue_type;
uint8_t reserved[3];
} t274;
+
+ struct __packed {
+ uint32_t length;
+ uint8_t buffer[];
+ } t275;
};
};
@@ -208,6 +214,8 @@ struct __packed qla27xx_fwdt_entry {
#define T268_BUF_TYPE_EXTD_TRACE 1
#define T268_BUF_TYPE_EXCH_BUFOFF 2
#define T268_BUF_TYPE_EXTD_LOGIN 3
+#define T268_BUF_TYPE_REQ_MIRROR 4
+#define T268_BUF_TYPE_RSP_MIRROR 5
#define T274_QUEUE_TYPE_REQ_SHAD 1
#define T274_QUEUE_TYPE_RSP_SHAD 2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4d2c98cbec4f..d88b86214ec5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.08-k"
+#define QLA2XXX_VERSION "8.07.00.16-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e2beab962096..031b2961c6b7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -50,8 +50,12 @@
#include "qla_target.h"
#include "tcm_qla2xxx.h"
-struct workqueue_struct *tcm_qla2xxx_free_wq;
-struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+static struct workqueue_struct *tcm_qla2xxx_free_wq;
+static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
/*
* Parse WWN.
@@ -386,6 +390,11 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ cmd->cmd_in_wq = 0;
+
+ WARN_ON(cmd->cmd_flags & BIT_16);
+
+ cmd->cmd_flags |= BIT_16;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -396,6 +405,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -405,6 +415,13 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
*/
static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
{
+ struct qla_tgt_cmd *cmd;
+
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ cmd->cmd_flags |= BIT_14;
+ }
+
return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
@@ -511,8 +528,13 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
{
- struct qla_tgt_cmd *cmd = container_of(se_cmd,
- struct qla_tgt_cmd, se_cmd);
+ struct qla_tgt_cmd *cmd;
+
+ /* check for task mgmt cmd */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ return 0xffffffff;
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
return cmd->tag;
}
@@ -562,6 +584,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_11;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@@ -590,6 +614,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
+ cmd->cmd_flags |= BIT_10;
+ cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -633,6 +659,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
@@ -640,6 +667,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
+ cmd->cmd_flags |= BIT_3;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
@@ -665,6 +693,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ if (cmd->cmd_flags & BIT_5) {
+ pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ dump_stack();
+ }
+ cmd->cmd_flags |= BIT_5;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
/*
@@ -734,10 +767,6 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
cmd->sg_mapped = 0;
}
-/* Local pointer to allocated TCM configfs fabric module */
-struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
-struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
-
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d81f3cc43ff1..79c77b485a67 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -670,14 +670,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- /*
- * If SCSI-2 or lower, store the LUN value in cmnd.
- */
- if (cmd->device->scsi_level <= SCSI_2 &&
- cmd->device->scsi_level != SCSI_UNKNOWN) {
+ /* Store the LUN value in cmnd, if needed. */
+ if (cmd->device->lun_in_cdb)
cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
(cmd->device->lun << 5 & 0xe0);
- }
scsi_log_send(cmd);
@@ -1371,7 +1367,11 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
+#ifdef CONFIG_SCSI_MQ_DEFAULT
+bool scsi_use_blk_mq = true;
+#else
bool scsi_use_blk_mq = false;
+#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d19c0e3c7f48..2b6d447ad6d6 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -123,7 +123,7 @@ static const char *scsi_debug_version_date = "20140706";
#define DEF_PHYSBLK_EXP 0
#define DEF_PTYPE 0
#define DEF_REMOVABLE false
-#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
+#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
#define DEF_SECTOR_SIZE 512
#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
#define DEF_UNMAP_ALIGNMENT 0
@@ -929,7 +929,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
{
unsigned char pq_pdt;
unsigned char * arr;
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
int alloc_len, n, ret;
alloc_len = (cmd[3] << 8) + cmd[4];
@@ -1056,15 +1056,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
memcpy(&arr[16], inq_product_id, 16);
memcpy(&arr[32], inq_product_rev, 4);
/* version descriptors (2 bytes each) follow */
- arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
- arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
+ arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
+ arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
n = 62;
if (scsi_debug_ptype == 0) {
- arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
+ arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
} else if (scsi_debug_ptype == 1) {
- arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
+ arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
}
- arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
+ arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
ret = fill_from_dev_buffer(scp, arr,
min(alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
@@ -1075,7 +1075,7 @@ static int resp_requests(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
unsigned char * sbuff;
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
unsigned char arr[SCSI_SENSE_BUFFERSIZE];
int want_dsense;
int len = 18;
@@ -1115,7 +1115,7 @@ static int resp_requests(struct scsi_cmnd * scp,
static int resp_start_stop(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
int power_cond, errsts, start;
errsts = check_readiness(scp, UAS_ONLY, devip);
@@ -1177,7 +1177,7 @@ static int resp_readcap(struct scsi_cmnd * scp,
static int resp_readcap16(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
unsigned long long capac;
int errsts, k, alloc_len;
@@ -1222,7 +1222,7 @@ static int resp_readcap16(struct scsi_cmnd * scp,
static int resp_report_tgtpgs(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
unsigned char * arr;
int host_no = devip->sdbg_host->shost->host_no;
int n, ret, alen, rlen;
@@ -1468,7 +1468,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
unsigned char * ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
errsts = check_readiness(scp, UAS_ONLY, devip);
if (errsts)
@@ -1630,7 +1630,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
int param_len, res, errsts, mpage;
unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
errsts = check_readiness(scp, UAS_ONLY, devip);
if (errsts)
@@ -1739,7 +1739,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
{
int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
errsts = check_readiness(scp, UAS_ONLY, devip);
if (errsts)
@@ -2414,7 +2414,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
unsigned int alloc_len;
int lun_cnt, i, upper, num, n;
u64 wlun, lun;
- unsigned char *cmd = (unsigned char *)scp->cmnd;
+ unsigned char *cmd = scp->cmnd;
int select_report = (int)cmd[2];
struct scsi_lun *one_lun;
unsigned char arr[SDEBUG_RLUN_ARR_SZ];
@@ -2743,6 +2743,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
if (test_bit(k, queued_in_use_bm)) {
sqcp = &queued_arr[k];
if (cmnd == sqcp->a_cmnd) {
+ devip = (struct sdebug_dev_info *)
+ cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ spin_unlock_irqrestore(&queued_arr_lock,
+ iflags);
if (scsi_debug_ndelay > 0) {
if (sqcp->sd_hrtp)
hrtimer_cancel(
@@ -2755,18 +2762,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
if (sqcp->tletp)
tasklet_kill(sqcp->tletp);
}
- __clear_bit(k, queued_in_use_bm);
- devip = (struct sdebug_dev_info *)
- cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
- break;
+ clear_bit(k, queued_in_use_bm);
+ return 1;
}
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return (k < qmax) ? 1 : 0;
+ return 0;
}
/* Deletes (stops) timers or tasklets of all queued commands */
@@ -2782,6 +2784,13 @@ static void stop_all_queued(void)
if (test_bit(k, queued_in_use_bm)) {
sqcp = &queued_arr[k];
if (sqcp->a_cmnd) {
+ devip = (struct sdebug_dev_info *)
+ sqcp->a_cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ spin_unlock_irqrestore(&queued_arr_lock,
+ iflags);
if (scsi_debug_ndelay > 0) {
if (sqcp->sd_hrtp)
hrtimer_cancel(
@@ -2794,12 +2803,8 @@ static void stop_all_queued(void)
if (sqcp->tletp)
tasklet_kill(sqcp->tletp);
}
- __clear_bit(k, queued_in_use_bm);
- devip = (struct sdebug_dev_info *)
- sqcp->a_cmnd->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- sqcp->a_cmnd = NULL;
+ clear_bit(k, queued_in_use_bm);
+ spin_lock_irqsave(&queued_arr_lock, iflags);
}
}
}
@@ -3006,7 +3011,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result, int delta_jiff)
{
unsigned long iflags;
- int k, num_in_q, tsf, qdepth, inject;
+ int k, num_in_q, qdepth, inject;
struct sdebug_queued_cmd *sqcp = NULL;
struct scsi_device *sdp = cmnd->device;
@@ -3019,55 +3024,48 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
__func__, scsi_result);
- if (delta_jiff == 0) {
- /* using same thread to call back mid-layer */
- cmnd->result = scsi_result;
- cmnd->scsi_done(cmnd);
- return 0;
- }
+ if (delta_jiff == 0)
+ goto respond_in_thread;
- /* deferred response cases */
+ /* schedule the response at a later time if resources permit */
spin_lock_irqsave(&queued_arr_lock, iflags);
num_in_q = atomic_read(&devip->num_in_q);
qdepth = cmnd->device->queue_depth;
- k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
- tsf = 0;
inject = 0;
- if ((qdepth > 0) && (num_in_q >= qdepth))
- tsf = 1;
- else if ((scsi_debug_every_nth != 0) &&
- (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) {
+ if ((qdepth > 0) && (num_in_q >= qdepth)) {
+ if (scsi_result) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ goto respond_in_thread;
+ } else
+ scsi_result = device_qfull_result;
+ } else if ((scsi_debug_every_nth != 0) &&
+ (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
+ (scsi_result == 0)) {
if ((num_in_q == (qdepth - 1)) &&
(atomic_inc_return(&sdebug_a_tsf) >=
abs(scsi_debug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
inject = 1;
- tsf = 1;
+ scsi_result = device_qfull_result;
}
}
- /* if (tsf) simulate device reporting SCSI status of TASK SET FULL.
- * Might override existing CHECK CONDITION. */
- if (tsf)
- scsi_result = device_qfull_result;
+ k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
if (k >= scsi_debug_max_queue) {
- if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
- tsf = 1;
spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (scsi_result)
+ goto respond_in_thread;
+ else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+ scsi_result = device_qfull_result;
if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
sdev_printk(KERN_INFO, sdp,
- "%s: num_in_q=%d, bypass q, %s%s\n",
- __func__, num_in_q,
- (inject ? "<inject> " : ""),
- (tsf ? "status: TASK SET FULL" :
- "report: host busy"));
- if (tsf) {
- /* queued_arr full so respond in same thread */
- cmnd->result = scsi_result;
- cmnd->scsi_done(cmnd);
- /* As scsi_done() is called "inline" must return 0 */
- return 0;
- } else
+ "%s: max_queue=%d exceeded, %s\n",
+ __func__, scsi_debug_max_queue,
+ (scsi_result ? "status: TASK SET FULL" :
+ "report: host busy"));
+ if (scsi_result)
+ goto respond_in_thread;
+ else
return SCSI_MLQUEUE_HOST_BUSY;
}
__set_bit(k, queued_in_use_bm);
@@ -3117,12 +3115,18 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
else
tasklet_schedule(sqcp->tletp);
}
- if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts))
+ if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
+ (scsi_result == device_qfull_result))
sdev_printk(KERN_INFO, sdp,
"%s: num_in_q=%d +1, %s%s\n", __func__,
num_in_q, (inject ? "<inject> " : ""),
"status: TASK SET FULL");
return 0;
+
+respond_in_thread: /* call back to mid-layer using invocation thread */
+ cmnd->result = scsi_result;
+ cmnd->scsi_done(cmnd);
+ return 0;
}
/* Note: The following macros create attribute files in the
@@ -3206,7 +3210,7 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err...
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
-MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
+MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
@@ -4085,7 +4089,7 @@ static void sdebug_remove_adapter(void)
static int
scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
{
- unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
+ unsigned char *cmd = SCpnt->cmnd;
int len, k;
unsigned int num;
unsigned long long lba;
@@ -4103,7 +4107,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, 0);
if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
- !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) {
+ !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
char b[120];
int n;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5db8454474ee..6b20ef3fee54 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1238,9 +1238,9 @@ retry_tur:
/**
* scsi_eh_test_devices - check if devices are responding from error recovery.
* @cmd_list: scsi commands in error recovery.
- * @work_q: queue for commands which still need more error recovery
- * @done_q: queue for commands which are finished
- * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
*
* Decription:
* Tests if devices are in a working state. Commands to devices now in
@@ -1373,7 +1373,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
/**
* scsi_eh_stu - send START_UNIT if needed
* @shost: &scsi host being recovered.
- * @work_q: &list_head for pending commands.
+ * @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
@@ -1436,7 +1436,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
/**
* scsi_eh_bus_device_reset - send bdr if needed
* @shost: scsi host being recovered.
- * @work_q: &list_head for pending commands.
+ * @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
@@ -1502,7 +1502,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
/**
* scsi_eh_target_reset - send target reset if needed
* @shost: scsi host being recovered.
- * @work_q: &list_head for pending commands.
+ * @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
@@ -1567,7 +1567,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
/**
* scsi_eh_bus_reset - send a bus reset
* @shost: &scsi host being recovered.
- * @work_q: &list_head for pending commands.
+ * @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
static int scsi_eh_bus_reset(struct Scsi_Host *shost,
@@ -1638,8 +1638,9 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
/**
* scsi_eh_host_reset - send a host reset
- * @work_q: list_head for processed commands.
- * @done_q: list_head for processed commands.
+ * @shost: host to be reset.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
*/
static int scsi_eh_host_reset(struct Scsi_Host *shost,
struct list_head *work_q,
@@ -1677,8 +1678,8 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost,
/**
* scsi_eh_offline_sdevs - offline scsi devices that fail to recover
- * @work_q: list_head for processed commands.
- * @done_q: list_head for processed commands.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
*/
static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q)
@@ -2043,8 +2044,8 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
/**
* scsi_eh_ready_devs - check device ready state and recover if not.
- * @shost: host to be recovered.
- * @work_q: &list_head for pending commands.
+ * @shost: host to be recovered.
+ * @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
void scsi_eh_ready_devs(struct Scsi_Host *shost,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aaea4b98af16..db8c449282f9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -645,16 +645,18 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
unsigned long flags;
- BUG_ON(list_empty(&cmd->list));
-
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
+ if (shost->use_cmd_list) {
+ BUG_ON(list_empty(&cmd->list));
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
}
/*
@@ -1816,13 +1818,11 @@ static int scsi_mq_prep_fn(struct request *req)
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
- /*
- * XXX: cmd_list lookups are only used by two drivers, try to get
- * rid of this list in common code.
- */
- spin_lock_irq(&sdev->list_lock);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irq(&sdev->list_lock);
+ if (shost->use_cmd_list) {
+ spin_lock_irq(&sdev->list_lock);
+ list_add_tail(&cmd->list, &sdev->cmd_list);
+ spin_unlock_irq(&sdev->list_lock);
+ }
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 56675dbbf681..ba3f1e8d0d57 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -736,6 +736,16 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->scsi_level++;
sdev->sdev_target->scsi_level = sdev->scsi_level;
+ /*
+ * If SCSI-2 or lower, and if the transport requires it,
+ * store the LUN value in CDB[1].
+ */
+ sdev->lun_in_cdb = 0;
+ if (sdev->scsi_level <= SCSI_2 &&
+ sdev->scsi_level != SCSI_UNKNOWN &&
+ !sdev->host->no_scsi2_lun_in_cdb)
+ sdev->lun_in_cdb = 1;
+
return 0;
}
@@ -805,6 +815,19 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
} else {
sdev->type = (inq_result[0] & 0x1f);
sdev->removable = (inq_result[1] & 0x80) >> 7;
+
+ /*
+ * some devices may respond with wrong type for
+ * well-known logical units. Force well-known type
+ * to enumerate them correctly.
+ */
+ if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
+ __func__, sdev->type, (unsigned int)sdev->lun);
+ sdev->type = TYPE_WLUN;
+ }
+
}
if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
@@ -1733,6 +1756,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
/* target removed before the device could be added */
if (sdev->sdev_state == SDEV_DEL)
continue;
+ /* If device is already visible, skip adding it to sysfs */
+ if (sdev->is_visible)
+ continue;
if (!scsi_host_scan_allowed(shost) ||
scsi_sysfs_add_sdev(sdev) != 0)
__scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8b4105a22ac2..f4cb7b3e9e23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1044,10 +1044,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
- /* The following call will keep sdev active indefinitely, until
- * its driver does a corresponding scsi_autopm_pm_device(). Only
- * drivers supporting autosuspend will do this.
- */
scsi_autopm_get_device(sdev);
error = device_add(&sdev->sdev_gendev);
@@ -1085,6 +1081,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
}
}
+ scsi_autopm_put_device(sdev);
return error;
}
@@ -1263,7 +1260,19 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
sdev->sdev_dev.class = &sdev_class;
dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ /*
+ * Get a default scsi_level from the target (derived from sibling
+ * devices). This is the best we can do for guessing how to set
+ * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
+ * setting doesn't matter, because all the bits are zero anyway.
+ * But it does matter for higher LUNs.
+ */
sdev->scsi_level = starget->scsi_level;
+ if (sdev->scsi_level <= SCSI_2 &&
+ sdev->scsi_level != SCSI_UNKNOWN &&
+ !shost->no_scsi2_lun_in_cdb)
+ sdev->lun_in_cdb = 1;
+
transport_setup_device(&sdev->sdev_gendev);
spin_lock_irqsave(shost->host_lock, flags);
list_add_tail(&sdev->same_target_siblings, &starget->devices);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2c2041ca4b70..0cb5c9f0c743 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -185,7 +185,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
if (ct < 0)
return -EINVAL;
rcd = ct & 0x01 ? 1 : 0;
- wce = ct & 0x02 ? 1 : 0;
+ wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
if (sdkp->cache_override) {
sdkp->WCE = wce;
@@ -2490,6 +2490,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->DPOFUA = 0;
}
+ /* No cache flush allowed for write protected devices */
+ if (sdkp->WCE && sdkp->write_prot)
+ sdkp->WCE = 0;
+
if (sdkp->first_scan || old_wce != sdkp->WCE ||
old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
sd_printk(KERN_NOTICE, sdkp,
@@ -2961,6 +2965,7 @@ static int sd_probe(struct device *dev)
int index;
int error;
+ scsi_autopm_get_device(sdp);
error = -ENODEV;
if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
goto out;
@@ -3037,6 +3042,7 @@ static int sd_probe(struct device *dev)
out_free:
kfree(sdkp);
out:
+ scsi_autopm_put_device(sdp);
return error;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7eeb93627beb..2de44cc58b1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -657,6 +657,7 @@ static int sr_probe(struct device *dev)
struct scsi_cd *cd;
int minor, error;
+ scsi_autopm_get_device(sdev);
error = -ENODEV;
if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM)
goto fail;
@@ -744,6 +745,7 @@ fail_put:
fail_free:
kfree(cd);
fail:
+ scsi_autopm_put_device(sdev);
return error;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index aff9689de0f7..d3fd6e8fb378 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4105,6 +4105,7 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
+ scsi_autopm_get_device(SDp);
i = queue_max_segments(SDp->request_queue);
if (st_max_sg_segs < i)
i = st_max_sg_segs;
@@ -4244,6 +4245,7 @@ out_put_disk:
out_buffer_free:
kfree(buffer);
out:
+ scsi_autopm_put_device(SDp);
return -ENODEV;
};
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fecac5d03fdd..733e5f759518 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1152,24 +1152,12 @@ static void storvsc_on_io_completion(struct hv_device *device,
stor_pkt->vm_srb.sense_info_length =
vstor_packet->vm_srb.sense_info_length;
- if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
- dev_warn(&device->device,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- stor_pkt->vm_srb.cdb[0],
- vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
- }
if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
if (vstor_packet->vm_srb.srb_status &
SRB_STATUS_AUTOSENSE_VALID) {
/* autosense data available */
- dev_warn(&device->device,
- "stor pkt %p autosense data valid - len %d\n",
- request,
- vstor_packet->vm_srb.sense_info_length);
memcpy(request->sense_buffer,
vstor_packet->vm_srb.sense_data,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index f07f90179bbc..6e07b2afddeb 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -35,6 +35,8 @@
config SCSI_UFSHCD
tristate "Universal Flash Storage Controller Driver Core"
depends on SCSI && SCSI_DMA
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
---help---
This selects the support for UFS devices in Linux, say Y and make
sure that you know the name of your UFS host adapter (the card
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index fafcf5e354c6..42c459a9d3fe 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -49,6 +49,27 @@
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
(byte1 << 8) | (byte0))
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
+#define UFS_UPIU_WLUN_ID (1 << 7)
+#define UFS_UPIU_MAX_GENERAL_LUN 8
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+ UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+ UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+ UFS_UPIU_BOOT_WLUN = 0xB0,
+ UFS_UPIU_RPMB_WLUN = 0xC4,
+};
/*
* UFS Protocol Information Unit related definitions
@@ -108,11 +129,13 @@ enum {
/* Flag idn for Query Requests*/
enum flag_idn {
QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
QUERY_FLAG_IDN_BKOPS_EN = 0x04,
};
/* Attribute idn for Query requests */
enum attr_idn {
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
QUERY_ATTR_IDN_EE_STATUS = 0x0E,
@@ -129,10 +152,29 @@ enum desc_idn {
QUERY_DESC_IDN_RFU_1 = 0x6,
QUERY_DESC_IDN_GEOMETRY = 0x7,
QUERY_DESC_IDN_POWER = 0x8,
- QUERY_DESC_IDN_RFU_2 = 0x9,
+ QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ QUERY_DESC_LENGTH_OFFSET = 0x00,
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+enum ufs_desc_max_size {
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
+ QUERY_DESC_UNIT_MAX_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
+ /*
+ * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
+ * of descriptor header.
+ */
+ QUERY_DESC_STRING_MAX_SIZE = 0xFE,
+ QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
+ QUERY_DESC_POWER_MAX_SIZE = 0x62,
+ QUERY_DESC_RFU_MAX_SIZE = 0x00,
};
-#define UNIT_DESC_MAX_SIZE 0x22
/* Unit descriptor parameters offsets in bytes*/
enum unit_desc_param {
UNIT_DESC_PARAM_LEN = 0x0,
@@ -153,6 +195,43 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
};
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+ UFS_LU_NO_WP = 0x00,
+ UFS_LU_POWER_ON_WP = 0x01,
+ UFS_LU_PERM_WP = 0x02,
+};
+
+/* bActiveICCLevel parameter current units */
+enum {
+ UFSHCD_NANO_AMP = 0,
+ UFSHCD_MICRO_AMP = 1,
+ UFSHCD_MILI_AMP = 2,
+ UFSHCD_AMP = 3,
+};
+
+#define POWER_DESC_MAX_SIZE 0x62
+#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
+
+/* Attribute bActiveICCLevel parameter bit masks definitions */
+#define ATTR_ICC_LVL_UNIT_OFFSET 14
+#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
+#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
+
+/* Power descriptor parameters offsets in bytes */
+enum power_desc_param_offset {
+ PWR_DESC_LEN = 0x0,
+ PWR_DESC_TYPE = 0x1,
+ PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
+ PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
+ PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
+};
+
/* Exception event mask values */
enum {
MASK_EE_STATUS = 0xFFFF,
@@ -160,11 +239,12 @@ enum {
};
/* Background operation status */
-enum {
+enum bkops_status {
BKOPS_STATUS_NO_OP = 0x0,
BKOPS_STATUS_NON_CRITICAL = 0x1,
BKOPS_STATUS_PERF_IMPACT = 0x2,
BKOPS_STATUS_CRITICAL = 0x3,
+ BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
};
/* UTP QUERY Transaction Specific Fields OpCode */
@@ -225,6 +305,14 @@ enum {
UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+};
+
/**
* struct utp_upiu_header - UPIU header structure
* @dword_0: UPIU header DW-0
@@ -362,4 +450,42 @@ struct ufs_query_res {
struct utp_upiu_query upiu_res;
};
+#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
+#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
+#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
+#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
+#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
+
+/*
+ * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
+ * and link is in Hibern8 state.
+ */
+#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
+
+struct ufs_vreg {
+ struct regulator *reg;
+ const char *name;
+ bool enabled;
+ int min_uV;
+ int max_uV;
+ int min_uA;
+ int max_uA;
+};
+
+struct ufs_vreg_info {
+ struct ufs_vreg *vcc;
+ struct ufs_vreg *vccq;
+ struct ufs_vreg *vccq2;
+ struct ufs_vreg *vdd_hba;
+};
+
+struct ufs_dev_info {
+ bool f_power_on_wp_en;
+ /* Keeps information if any of the LU is power on write protected */
+ bool is_lu_power_on_wp;
+};
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index afaabe2aeac8..955ed5587011 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -43,34 +43,24 @@
* @pdev: pointer to PCI device handle
* @state: power state
*
- * Returns -ENOSYS
+ * Returns 0 if successful
+ * Returns non-zero otherwise
*/
static int ufshcd_pci_suspend(struct device *dev)
{
- /*
- * TODO:
- * 1. Call ufshcd_suspend
- * 2. Do bus specific power management
- */
-
- return -ENOSYS;
+ return ufshcd_system_suspend(dev_get_drvdata(dev));
}
/**
* ufshcd_pci_resume - resume power management function
* @pdev: pointer to PCI device handle
*
- * Returns -ENOSYS
+ * Returns 0 if successful
+ * Returns non-zero otherwise
*/
static int ufshcd_pci_resume(struct device *dev)
{
- /*
- * TODO:
- * 1. Call ufshcd_resume.
- * 2. Do bus specific wake up
- */
-
- return -ENOSYS;
+ return ufshcd_system_resume(dev_get_drvdata(dev));
}
#else
#define ufshcd_pci_suspend NULL
@@ -80,30 +70,15 @@ static int ufshcd_pci_resume(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
static int ufshcd_pci_runtime_suspend(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_suspend(hba);
+ return ufshcd_runtime_suspend(dev_get_drvdata(dev));
}
static int ufshcd_pci_runtime_resume(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_resume(hba);
+ return ufshcd_runtime_resume(dev_get_drvdata(dev));
}
static int ufshcd_pci_runtime_idle(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_idle(hba);
+ return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
#else /* !CONFIG_PM_RUNTIME */
#define ufshcd_pci_runtime_suspend NULL
@@ -117,7 +92,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev)
*/
static void ufshcd_pci_shutdown(struct pci_dev *pdev)
{
- ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+ ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
}
/**
@@ -164,7 +139,15 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mmio_base = pcim_iomap_table(pdev)[0];
- err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+ err = ufshcd_alloc_host(&pdev->dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ return err;
+ }
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
return err;
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 5e4623225422..8adf067ff019 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -35,53 +35,236 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include "ufshcd.h"
+static const struct of_device_id ufs_of_match[];
+static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev)
+{
+ if (dev->of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(ufs_of_match, dev->of_node);
+ if (match)
+ return (struct ufs_hba_variant_ops *)match->data;
+ }
+
+ return NULL;
+}
+
+static int ufshcd_parse_clock_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+ int cnt;
+ int i;
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ char *name;
+ u32 *clkfreq = NULL;
+ struct ufs_clk_info *clki;
+ int len = 0;
+ size_t sz = 0;
+
+ if (!np)
+ goto out;
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (!cnt || (cnt == -EINVAL)) {
+ dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+ __func__);
+ } else if (cnt < 0) {
+ dev_err(dev, "%s: count clock strings failed, err %d\n",
+ __func__, cnt);
+ ret = cnt;
+ }
+
+ if (cnt <= 0)
+ goto out;
+
+ if (!of_get_property(np, "freq-table-hz", &len)) {
+ dev_info(dev, "freq-table-hz property not specified\n");
+ goto out;
+ }
+
+ if (len <= 0)
+ goto out;
+
+ sz = len / sizeof(*clkfreq);
+ if (sz != 2 * cnt) {
+ dev_err(dev, "%s len mismatch\n", "freq-table-hz");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
+ GFP_KERNEL);
+ if (!clkfreq) {
+ dev_err(dev, "%s: no memory\n", "freq-table-hz");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, "freq-table-hz",
+ clkfreq, sz);
+ if (ret && (ret != -EINVAL)) {
+ dev_err(dev, "%s: error reading array %d\n",
+ "freq-table-hz", ret);
+ goto free_clkfreq;
+ }
+
+ for (i = 0; i < sz; i += 2) {
+ ret = of_property_read_string_index(np,
+ "clock-names", i/2, (const char **)&name);
+ if (ret)
+ goto free_clkfreq;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki) {
+ ret = -ENOMEM;
+ goto free_clkfreq;
+ }
+
+ clki->min_freq = clkfreq[i];
+ clki->max_freq = clkfreq[i+1];
+ clki->name = kstrdup(name, GFP_KERNEL);
+ dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
+ clki->min_freq, clki->max_freq, clki->name);
+ list_add_tail(&clki->list, &hba->clk_list_head);
+ }
+free_clkfreq:
+ kfree(clkfreq);
+out:
+ return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for %s regulator\n", name);
+ goto out;
+ }
+
+ vreg->name = kstrdup(name, GFP_KERNEL);
+
+ /* if fixed regulator no need further initialization */
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
+ if (of_property_read_bool(np, prop_name))
+ goto out;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out_free;
+ }
+
+ vreg->min_uA = 0;
+ if (!strcmp(name, "vcc")) {
+ if (of_property_read_bool(np, "vcc-supply-1p8")) {
+ vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
+ } else {
+ vreg->min_uV = UFS_VREG_VCC_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCC_MAX_UV;
+ }
+ } else if (!strcmp(name, "vccq")) {
+ vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
+ } else if (!strcmp(name, "vccq2")) {
+ vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
+ }
+
+ goto out;
+
+out_free:
+ devm_kfree(dev, vreg);
+ vreg = NULL;
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
+/**
+ * ufshcd_parse_regulator_info - get regulator info from device tree
+ * @hba: per adapter instance
+ *
+ * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
+ * If any of the supplies are not defined it is assumed that they are always-on
+ * and hence return zero. If the property is defined but parsing is failed
+ * then return corresponding error.
+ */
+static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+out:
+ return err;
+}
+
#ifdef CONFIG_PM
/**
* ufshcd_pltfrm_suspend - suspend power management function
* @dev: pointer to device handle
*
- *
- * Returns 0
+ * Returns 0 if successful
+ * Returns non-zero otherwise
*/
static int ufshcd_pltfrm_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- /*
- * TODO:
- * 1. Call ufshcd_suspend
- * 2. Do bus specific power management
- */
-
- disable_irq(hba->irq);
-
- return 0;
+ return ufshcd_system_suspend(dev_get_drvdata(dev));
}
/**
* ufshcd_pltfrm_resume - resume power management function
* @dev: pointer to device handle
*
- * Returns 0
+ * Returns 0 if successful
+ * Returns non-zero otherwise
*/
static int ufshcd_pltfrm_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- /*
- * TODO:
- * 1. Call ufshcd_resume.
- * 2. Do bus specific wake up
- */
-
- enable_irq(hba->irq);
-
- return 0;
+ return ufshcd_system_resume(dev_get_drvdata(dev));
}
#else
#define ufshcd_pltfrm_suspend NULL
@@ -91,30 +274,15 @@ static int ufshcd_pltfrm_resume(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_suspend(hba);
+ return ufshcd_runtime_suspend(dev_get_drvdata(dev));
}
static int ufshcd_pltfrm_runtime_resume(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_resume(hba);
+ return ufshcd_runtime_resume(dev_get_drvdata(dev));
}
static int ufshcd_pltfrm_runtime_idle(struct device *dev)
{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!hba)
- return 0;
-
- return ufshcd_runtime_idle(hba);
+ return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
#else /* !CONFIG_PM_RUNTIME */
#define ufshcd_pltfrm_runtime_suspend NULL
@@ -122,6 +290,11 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev)
#define ufshcd_pltfrm_runtime_idle NULL
#endif /* CONFIG_PM_RUNTIME */
+static void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
+}
+
/**
* ufshcd_pltfrm_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
@@ -138,8 +311,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mmio_base = devm_ioremap_resource(dev, mem_res);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
+ if (IS_ERR(*(void **)&mmio_base)) {
+ err = PTR_ERR(*(void **)&mmio_base);
goto out;
}
@@ -150,10 +323,31 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
goto out;
}
+ err = ufshcd_alloc_host(dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ goto out;
+ }
+
+ hba->vops = get_variant_ops(&pdev->dev);
+
+ err = ufshcd_parse_clock_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: clock parse failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ err = ufshcd_parse_regulator_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: regulator init failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- err = ufshcd_init(dev, &hba, mmio_base, irq);
+ err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err(dev, "Intialization failed\n");
goto out_disable_rpm;
@@ -201,6 +395,7 @@ static const struct dev_pm_ops ufshcd_dev_pm_ops = {
static struct platform_driver ufshcd_pltfrm_driver = {
.probe = ufshcd_pltfrm_probe,
.remove = ufshcd_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "ufshcd",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ba27215b8034..497c38a4a866 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,6 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -31,16 +32,19 @@
* circumstances will the contributor of this Program be liable for
* any damages of any kind arising from your use or distribution of
* this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
*/
#include <linux/async.h>
+#include <linux/devfreq.h>
#include "ufshcd.h"
#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
- UIC_POWER_MODE |\
UFSHCD_ERROR_MASK)
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -58,16 +62,44 @@
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
+ ({ \
+ int _ret; \
+ if (_on) \
+ _ret = ufshcd_enable_vreg(_dev, _vreg); \
+ else \
+ _ret = ufshcd_disable_vreg(_dev, _vreg); \
+ _ret; \
+ })
+
+static u32 ufs_query_desc_max_size[] = {
+ QUERY_DESC_DEVICE_MAX_SIZE,
+ QUERY_DESC_CONFIGURAION_MAX_SIZE,
+ QUERY_DESC_UNIT_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+ QUERY_DESC_INTERCONNECT_MAX_SIZE,
+ QUERY_DESC_STRING_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+ QUERY_DESC_GEOMETRY_MAZ_SIZE,
+ QUERY_DESC_POWER_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+};
+
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_MAX_LUNS = 8,
UFSHCD_CMD_PER_LUN = 32,
UFSHCD_CAN_QUEUE = 32,
};
@@ -106,12 +138,79 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
(h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].link_state;
+}
+
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
- struct scsi_device *sdev);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->is_irq_enabled) {
+ ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+ hba);
+ if (ret)
+ dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+ __func__, ret);
+ hba->is_irq_enabled = true;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+ if (hba->is_irq_enabled) {
+ free_irq(hba->irq, hba);
+ hba->is_irq_enabled = false;
+ }
+}
/*
* ufshcd_wait_for_register - wait for register value to change
@@ -175,13 +274,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
/**
* ufshcd_is_device_present - Check if any device connected to
* the host controller
- * @reg_hcs - host controller status register value
+ * @hba: pointer to adapter instance
*
* Returns 1 if device present, 0 if no device detected
*/
-static inline int ufshcd_is_device_present(u32 reg_hcs)
+static inline int ufshcd_is_device_present(struct ufs_hba *hba)
{
- return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? 1 : 0;
}
/**
@@ -413,6 +513,265 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
}
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.ungate_work);
+
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == CLKS_ON) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_setup_clocks(hba, true);
+
+ /* Exit from hibern8 */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ /* Prevent gating in this path */
+ hba->clk_gating.is_suspended = true;
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ else
+ ufshcd_set_link_active(hba);
+ }
+ hba->clk_gating.is_suspended = false;
+ }
+unblock_reqs:
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+ scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+
+start:
+ switch (hba->clk_gating.state) {
+ case CLKS_ON:
+ break;
+ case REQ_CLKS_OFF:
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ hba->clk_gating.state = CLKS_ON;
+ break;
+ }
+ /*
+ * If we here, it means gating work is either done or
+ * currently running. Hence, fall through to cancel gating
+ * work and to enable clocks.
+ */
+ case CLKS_OFF:
+ scsi_block_requests(hba->host);
+ hba->clk_gating.state = REQ_CLKS_ON;
+ schedule_work(&hba->clk_gating.ungate_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_CLKS_ON:
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ /* Make sure state is CLKS_ON before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ __func__, hba->clk_gating.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.gate_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.is_suspended) {
+ hba->clk_gating.state = CLKS_ON;
+ goto rel_lock;
+ }
+
+ if (hba->clk_gating.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* put the link into hibern8 mode before turning off clocks */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ if (ufshcd_uic_hibern8_enter(hba)) {
+ hba->clk_gating.state = CLKS_ON;
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+ }
+
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case keep the state
+ * as REQ_CLKS_ON which would anyway imply that clocks are off
+ * and a request to turn them on is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == REQ_CLKS_OFF)
+ hba->clk_gating.state = CLKS_OFF;
+
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.active_reqs--;
+
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ return;
+
+ hba->clk_gating.state = REQ_CLKS_OFF;
+ schedule_delayed_work(&hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.delay_ms = 150;
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return;
+
+ if (!hba->clk_scaling.is_busy_started) {
+ hba->clk_scaling.busy_start_t = ktime_get();
+ hba->clk_scaling.is_busy_started = true;
+ }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return;
+
+ if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+ scaling->busy_start_t = ktime_set(0, 0);
+ scaling->is_busy_started = false;
+ }
+}
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
@@ -421,6 +780,7 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
+ ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
@@ -576,15 +936,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
*
* Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held.
+ * with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
- int ret;
- unsigned long flags;
-
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
@@ -593,13 +950,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
init_completion(&uic_cmd->done);
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
- return ret;
+ return 0;
}
/**
@@ -613,11 +966,19 @@ static int
ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
+ unsigned long flags;
+ ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
+ spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
mutex_unlock(&hba->uic_cmd_mutex);
+ ufshcd_release(hba);
return ret;
}
@@ -867,6 +1228,32 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return ret;
}
+/*
+ * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
+ * @scsi_lun: scsi LUN id
+ *
+ * Returns UPIU LUN id
+ */
+static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
+{
+ if (scsi_is_wlun(scsi_lun))
+ return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
+ | UFS_UPIU_WLUN_ID;
+ else
+ return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @scsi_lun: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+ return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
@@ -918,6 +1305,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ err = ufshcd_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -925,7 +1320,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
- lrbp->lun = cmd->device->lun;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = false;
lrbp->command_type = UTP_CMD_TYPE_SCSI;
@@ -1193,6 +1588,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
+ ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -1236,6 +1632,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
return err;
}
@@ -1259,6 +1656,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
+ ufshcd_hold(hba, false);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
@@ -1298,6 +1696,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
out:
+ ufshcd_release(hba);
return err;
}
@@ -1325,6 +1724,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
BUG_ON(!hba);
+ ufshcd_hold(hba, false);
if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode);
@@ -1374,10 +1774,120 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
out:
+ ufshcd_release(hba);
return err;
}
/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u32 param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ u32 buff_len;
+ bool is_kmalloc = true;
+
+ /* safety checks */
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ buff_len = ufs_query_desc_max_size[desc_id];
+ if ((param_offset + param_size) > buff_len)
+ return -EINVAL;
+
+ if (!param_offset && (param_size == buff_len)) {
+ /* memory space already available to hold full descriptor */
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ } else {
+ /* allocate memory to hold full descriptor */
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ }
+
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, desc_buf,
+ &buff_len);
+
+ if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+ (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+ ufs_query_desc_max_size[desc_id])
+ || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+ __func__, desc_id, param_offset, buff_len, ret);
+ if (!ret)
+ ret = -EINVAL;
+
+ goto out;
+ }
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ int lun,
+ enum unit_desc_param param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ /*
+ * Unit descriptors are only available for general purpose LUs (LUN id
+ * from 0 to 7) and RPMB Well known LU.
+ */
+ if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+ return -EOPNOTSUPP;
+
+ return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+ param_offset, param_read_buf, param_size);
+}
+
+/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
*
@@ -1621,44 +2131,54 @@ out:
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
- * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
- * using DME_SET primitives.
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
* @hba: per adapter instance
- * @mode: powr mode value
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
- struct uic_command uic_cmd = {0};
- struct completion pwr_done;
+ struct completion uic_async_done;
unsigned long flags;
u8 status;
int ret;
- uic_cmd.command = UIC_CMD_DME_SET;
- uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
- uic_cmd.argument3 = mode;
- init_completion(&pwr_done);
-
mutex_lock(&hba->uic_cmd_mutex);
+ init_completion(&uic_async_done);
spin_lock_irqsave(hba->host->host_lock, flags);
- hba->pwr_done = &pwr_done;
+ hba->uic_async_done = &uic_async_done;
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev,
- "pwr mode change with mode 0x%x uic error %d\n",
- mode, ret);
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+ ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
goto out;
}
- if (!wait_for_completion_timeout(hba->pwr_done,
+ if (!wait_for_completion_timeout(hba->uic_async_done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
dev_err(hba->dev,
- "pwr mode change with mode 0x%x completion timeout\n",
- mode);
+ "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ cmd->command, cmd->argument3);
ret = -ETIMEDOUT;
goto out;
}
@@ -1666,53 +2186,144 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
- "pwr mode change failed, host umpcrs:0x%x\n",
- status);
+ "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
+ cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
out:
spin_lock_irqsave(hba->host->host_lock, flags);
- hba->pwr_done = NULL;
+ hba->uic_async_done = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+
return ret;
}
/**
- * ufshcd_config_max_pwr_mode - Set & Change power mode with
- * maximum capability attribute information.
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
* @hba: per adapter instance
+ * @mode: powr mode value
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
- enum {RX = 0, TX = 1};
- u32 lanes[] = {1, 1};
- u32 gear[] = {1, 1};
- u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+ struct uic_command uic_cmd = {0};
int ret;
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+
+ return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ if (ret) {
+ ufshcd_set_link_off(hba);
+ ret = ufshcd_host_reset_and_restore(hba);
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FASTAUTO_MODE;
+ pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
/* Get the connected lane count */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
* Then, get the maximum gears of PWM speed.
*/
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
- if (!gear[RX]) {
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
- pwr[RX] = SLOWAUTO_MODE;
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOWAUTO_MODE;
}
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
- if (!gear[TX]) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
- &gear[TX]);
- pwr[TX] = SLOWAUTO_MODE;
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOWAUTO_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
}
/*
@@ -1721,23 +2332,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
* - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
* - PA_HSSERIES
*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
- if (pwr[RX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
- if (pwr[TX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
- if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
- ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
- if (ret)
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+
+ if (ret) {
dev_err(hba->dev,
- "pwr_mode: power mode change failed %d\n", ret);
+ "%s: power mode change failed %d\n", __func__, ret);
+ } else {
+ if (hba->vops && hba->vops->pwr_change_notify)
+ hba->vops->pwr_change_notify(hba,
+ POST_CHANGE, NULL, pwr_mode);
+
+ memcpy(&hba->pwr_info, pwr_mode,
+ sizeof(struct ufs_pa_layer_attr));
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ struct ufs_pa_layer_attr final_params = { 0 };
+ int ret;
+
+ if (hba->vops && hba->vops->pwr_change_notify)
+ hba->vops->pwr_change_notify(hba,
+ PRE_CHANGE, desired_pwr_mode, &final_params);
+ else
+ memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+ ret = ufshcd_change_power_mode(hba, &final_params);
return ret;
}
@@ -1798,11 +2453,10 @@ out:
* @hba: per adapter instance
*
* To bring UFS host controller to operational state,
- * 1. Check if device is present
- * 2. Enable required interrupts
- * 3. Configure interrupt aggregation
- * 4. Program UTRL and UTMRL base addres
- * 5. Configure run-stop-registers
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base addres
+ * 4. Configure run-stop-registers
*
* Returns 0 on success, non-zero value on failure
*/
@@ -1811,14 +2465,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
int err = 0;
u32 reg;
- /* check if device present */
- reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
- if (!ufshcd_is_device_present(reg)) {
- dev_err(hba->dev, "cc: Device not present\n");
- err = -ENXIO;
- goto out;
- }
-
/* Enable required interrupts */
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
@@ -1839,6 +2485,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
* DEI, HEI bits must be 0
*/
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
@@ -1885,6 +2532,12 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
msleep(5);
}
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
+
+ if (hba->vops && hba->vops->hce_enable_notify)
+ hba->vops->hce_enable_notify(hba, PRE_CHANGE);
+
/* start controller initialization sequence */
ufshcd_hba_start(hba);
@@ -1912,6 +2565,13 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
}
msleep(5);
}
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ if (hba->vops && hba->vops->hce_enable_notify)
+ hba->vops->hce_enable_notify(hba, POST_CHANGE);
+
return 0;
}
@@ -1924,16 +2584,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ do {
+ if (hba->vops && hba->vops->link_startup_notify)
+ hba->vops->link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
- ret = ufshcd_dme_link_startup(hba);
if (ret)
+ /* failed to get the link up... retire */
goto out;
- ret = ufshcd_make_hba_operational(hba);
+ /* Include any host controller configuration via UIC commands */
+ if (hba->vops && hba->vops->link_startup_notify) {
+ ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+ }
+ ret = ufshcd_make_hba_operational(hba);
out:
if (ret)
dev_err(hba->dev, "link startup failed %d\n", ret);
@@ -1955,6 +2641,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
+ ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -1966,6 +2653,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -1973,6 +2661,100 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
}
/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+ int ret = 0;
+ u8 lun_qdepth;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ lun_qdepth = hba->nutrs;
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+
+ /* Some WLUN doesn't support unit descriptor */
+ if (ret == -EOPNOTSUPP)
+ lun_qdepth = 1;
+ else if (!lun_qdepth)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_activate_tcq(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+ u8 lun,
+ u8 *b_lu_write_protect)
+{
+ int ret;
+
+ if (!b_lu_write_protect)
+ ret = -EINVAL;
+ /*
+ * According to UFS device spec, RPMB LU can't be write
+ * protected so skip reading bLUWriteProtect parameter for
+ * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+ */
+ else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ ret = -ENOTSUPP;
+ else
+ ret = ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_LU_WR_PROTECT,
+ b_lu_write_protect,
+ sizeof(*b_lu_write_protect));
+ return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ if (hba->dev_info.f_power_on_wp_en &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ u8 b_lu_write_protect;
+
+ if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+ &b_lu_write_protect) &&
+ (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+ hba->dev_info.is_lu_power_on_wp = true;
+ }
+}
+
+/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
@@ -1981,7 +2763,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
static int ufshcd_slave_alloc(struct scsi_device *sdev)
{
struct ufs_hba *hba;
- int lun_qdepth;
hba = shost_priv(sdev->host);
sdev->tagged_supported = 1;
@@ -1996,16 +2777,10 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
- lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
- if (lun_qdepth <= 0)
- /* eventually, we can figure out the real queue depth */
- lun_qdepth = hba->nutrs;
- else
- lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
- dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
- __func__, lun_qdepth);
- scsi_activate_tcq(sdev, lun_qdepth);
+ ufshcd_set_queue_depth(sdev);
+
+ ufshcd_get_lu_power_on_wp_status(hba, sdev);
return 0;
}
@@ -2068,6 +2843,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
hba = shost_priv(sdev->host);
scsi_deactivate_tcq(sdev, hba->nutrs);
+ /* Drop the reference as it won't be needed anymore */
+ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+ hba->sdev_ufs_device = NULL;
}
/**
@@ -2234,8 +3012,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
complete(&hba->active_uic_cmd->done);
}
- if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
- complete(hba->pwr_done);
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ complete(hba->uic_async_done);
}
/**
@@ -2275,6 +3053,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
clear_bit_unlock(index, &hba->lrb_in_use);
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
+ __ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete)
complete(hba->dev_cmd.complete);
@@ -2284,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
+ ufshcd_clk_scaling_update_busy(hba);
+
/* we might have free'd some tags above */
wake_up(&hba->dev_cmd.tag_wq);
}
@@ -2447,33 +3228,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
}
/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
+ * @status: bkops_status value
*
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
*/
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+ enum bkops_status status)
{
int err;
- u32 status = 0;
+ u32 curr_status = 0;
- err = ufshcd_get_bkops_status(hba, &status);
+ err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
__func__, err);
goto out;
+ } else if (curr_status > BKOPS_STATUS_MAX) {
+ dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+ __func__, curr_status);
+ err = -EINVAL;
+ goto out;
}
- status = status & 0xF;
-
- /* handle only if status indicates performance impact or critical */
- if (status >= BKOPS_STATUS_PERF_IMPACT)
+ if (curr_status >= status)
err = ufshcd_enable_auto_bkops(hba);
+ else
+ err = ufshcd_disable_auto_bkops(hba);
out:
return err;
}
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+}
+
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -2505,7 +3315,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
status &= hba->ee_ctrl_mask;
if (status & MASK_EE_URGENT_BKOPS) {
err = ufshcd_urgent_bkops(hba);
- if (err)
+ if (err < 0)
dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
__func__, err);
}
@@ -2530,6 +3340,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@@ -2583,6 +3394,7 @@ static void ufshcd_err_handler(struct work_struct *work)
out:
scsi_unblock_requests(hba->host);
+ ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
}
@@ -2766,6 +3578,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
task_req_descp = hba->utmrdl_base_addr;
@@ -2785,7 +3598,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
lun_id, task_tag);
task_req_upiup->header.dword_1 =
UPIU_HEADER_DWORD(0, tm_function, 0, 0);
-
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
@@ -2814,6 +3630,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq);
+ ufshcd_release(hba);
return err;
}
@@ -2896,6 +3713,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba = shost_priv(host);
tag = cmd->request->tag;
+ ufshcd_hold(hba, false);
/* If command is already aborted/completed, return SUCCESS */
if (!(test_bit(tag, &hba->outstanding_reqs)))
goto out;
@@ -2960,6 +3778,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
clear_bit_unlock(tag, &hba->lrb_in_use);
wake_up(&hba->dev_cmd.tag_wq);
+
out:
if (!err) {
err = SUCCESS;
@@ -2968,6 +3787,11 @@ out:
err = FAILED;
}
+ /*
+ * This ufshcd_release() corresponds to the original scsi cmd that got
+ * aborted here (as we won't get any IRQ for it).
+ */
+ ufshcd_release(hba);
return err;
}
@@ -2984,7 +3808,6 @@ out:
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
int err;
- async_cookie_t cookie;
unsigned long flags;
/* Reset the host controller */
@@ -2997,10 +3820,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
goto out;
/* Establish the link again and restore the device */
- cookie = async_schedule(ufshcd_async_scan, hba);
- /* wait for async scan to be completed */
- async_synchronize_cookie(++cookie);
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ err = ufshcd_probe_hba(hba);
+
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
err = -EIO;
out:
if (err)
@@ -3022,8 +3844,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
unsigned long flags;
+ int retries = MAX_HOST_RESET_RETRIES;
- err = ufshcd_host_reset_and_restore(hba);
+ do {
+ err = ufshcd_host_reset_and_restore(hba);
+ } while (err && --retries);
/*
* After reset the door-bell might be cleared, complete
@@ -3051,6 +3876,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(cmd->device->host);
+ ufshcd_hold(hba, false);
/*
* Check if there is any race with fatal error handling.
* If so, wait for it to complete. Even though fatal error
@@ -3084,56 +3910,232 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
return err;
}
/**
- * ufshcd_read_sdev_qdepth - read the lun command queue depth
- * @hba: Pointer to adapter instance
- * @sdev: pointer to SCSI device
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
*
- * Return in case of success the lun's queue depth else error.
+ * Returns calculated max ICC level for specific regulator
*/
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
- struct scsi_device *sdev)
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+ int i;
+ int curr_uA;
+ u16 data;
+ u16 unit;
+
+ for (i = start_scan; i >= 0; i--) {
+ data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+ unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+ ATTR_ICC_LVL_UNIT_OFFSET;
+ curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+ switch (unit) {
+ case UFSHCD_NANO_AMP:
+ curr_uA = curr_uA / 1000;
+ break;
+ case UFSHCD_MILI_AMP:
+ curr_uA = curr_uA * 1000;
+ break;
+ case UFSHCD_AMP:
+ curr_uA = curr_uA * 1000 * 1000;
+ break;
+ case UFSHCD_MICRO_AMP:
+ default:
+ break;
+ }
+ if (sup_curr_uA >= curr_uA)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+ }
+
+ return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ u8 *desc_buf, int len)
+{
+ u32 icc_level = 0;
+
+ if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+ !hba->vreg_info.vccq2) {
+ dev_err(hba->dev,
+ "%s: Regulator capability was not set, actvIccLevel=%d",
+ __func__, icc_level);
+ goto out;
+ }
+
+ if (hba->vreg_info.vcc)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+ if (hba->vreg_info.vccq)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+ if (hba->vreg_info.vccq2)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+ return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
- int buff_len = UNIT_DESC_MAX_SIZE;
- u8 desc_buf[UNIT_DESC_MAX_SIZE];
+ int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+ u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
- ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
- QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: Failed reading power descriptor.len = %d ret = %d",
+ __func__, buff_len, ret);
+ return;
+ }
+
+ hba->init_prefetch_data.icc_level =
+ ufshcd_find_max_sup_active_icc_level(hba,
+ desc_buf, buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+ __func__, hba->init_prefetch_data.icc_level);
- if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
+
+ if (ret)
dev_err(hba->dev,
- "%s:Failed reading unit descriptor. len = %d ret = %d"
- , __func__, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+ __func__, hba->init_prefetch_data.icc_level , ret);
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ * "REPORT_LUNS" (address: 01h)
+ * "UFS Device" (address: 50h)
+ * "RPMB" (address: 44h)
+ * "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+ if (IS_ERR(hba->sdev_ufs_device)) {
+ ret = PTR_ERR(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
goto out;
}
- ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
+ hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(hba->sdev_boot)) {
+ ret = PTR_ERR(hba->sdev_boot);
+ hba->sdev_boot = NULL;
+ goto remove_sdev_ufs_device;
+ }
+
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+ if (IS_ERR(hba->sdev_rpmb)) {
+ ret = PTR_ERR(hba->sdev_rpmb);
+ hba->sdev_rpmb = NULL;
+ goto remove_sdev_boot;
+ }
+ goto out;
+
+remove_sdev_boot:
+ scsi_remove_device(hba->sdev_boot);
+remove_sdev_ufs_device:
+ scsi_remove_device(hba->sdev_ufs_device);
out:
return ret;
}
/**
- * ufshcd_async_scan - asynchronous execution for link startup
- * @data: data pointer to pass to this function
- * @cookie: cookie data
+ * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
+ * ufshcd_scsi_add_wlus()
+ * @hba: per-adapter instance
+ *
*/
-static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
+{
+ if (hba->sdev_ufs_device) {
+ scsi_remove_device(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
+ }
+
+ if (hba->sdev_boot) {
+ scsi_remove_device(hba->sdev_boot);
+ hba->sdev_boot = NULL;
+ }
+
+ if (hba->sdev_rpmb) {
+ scsi_remove_device(hba->sdev_rpmb);
+ hba->sdev_rpmb = NULL;
+ }
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
{
- struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
ret = ufshcd_link_startup(hba);
if (ret)
goto out;
- ufshcd_config_max_pwr_mode(hba);
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
ret = ufshcd_verify_dev_init(hba);
if (ret)
@@ -3143,16 +4145,77 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
if (ret)
goto out;
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ hba->wlun_dev_clr_ua = true;
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ }
+
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+ */
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ bool flag;
+
+ /* clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+ if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ if (!hba->is_init_prefetch)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ if (ufshcd_scsi_add_wlus(hba))
+ goto out;
- /* If we are in error handling context no need to scan the host */
- if (!ufshcd_eh_in_progress(hba)) {
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
+
+ if (!hba->is_init_prefetch)
+ hba->is_init_prefetch = true;
+
+ /* Resume devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+
out:
- return;
+ /*
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
+ */
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_hba_exit(hba);
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)data;
+
+ ufshcd_probe_hba(hba);
}
static struct scsi_host_template ufshcd_driver_template = {
@@ -3171,70 +4234,956 @@ static struct scsi_host_template ufshcd_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
+ .max_host_blocked = 1,
};
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ int ua)
+{
+ int ret = 0;
+ struct regulator *reg = vreg->reg;
+ const char *name = vreg->name;
+
+ BUG_ON(!vreg);
+
+ ret = regulator_set_optimum_mode(reg, ua);
+ if (ret >= 0) {
+ /*
+ * regulator_set_optimum_mode() returns new regulator
+ * mode upon success.
+ */
+ ret = 0;
+ } else {
+ dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
+ __func__, name, ua, ret);
+ }
+
+ return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg = vreg->reg;
+ const char *name = vreg->name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg || vreg->enabled)
+ goto out;
+
+ ret = ufshcd_config_vreg(dev, vreg, true);
+ if (!ret)
+ ret = regulator_enable(vreg->reg);
+
+ if (!ret)
+ vreg->enabled = true;
+ else
+ dev_err(dev, "%s: %s enable failed, err=%d\n",
+ __func__, vreg->name, ret);
+out:
+ return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg || !vreg->enabled)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufshcd_config_vreg(dev, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+ if (ret)
+ goto out;
+
+out:
+ if (ret) {
+ ufshcd_toggle_vreg(dev, info->vccq2, false);
+ ufshcd_toggle_vreg(dev, info->vccq, false);
+ ufshcd_toggle_vreg(dev, info->vcc, false);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+ return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vcc);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+ return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+ return 0;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long flags;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ continue;
+
+ if (on && !clki->enabled) {
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+ } else if (!on && clki->enabled) {
+ clk_disable_unprepare(clki->clk);
+ }
+ clki->enabled = on;
+ dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+ clki->name, on ? "en" : "dis");
+ }
+ }
+
+ if (hba->vops && hba->vops->setup_clocks)
+ ret = hba->vops->setup_clocks(hba, on);
+out:
+ if (ret) {
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ clk_disable_unprepare(clki->clk);
+ }
+ } else if (!ret && on) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.state = CLKS_ON;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ return __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct device *dev = hba->dev;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ }
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->vops)
+ goto out;
+
+ if (hba->vops->init) {
+ err = hba->vops->init(hba);
+ if (err)
+ goto out;
+ }
+
+ if (hba->vops->setup_regulators) {
+ err = hba->vops->setup_regulators(hba, true);
+ if (err)
+ goto out_exit;
+ }
+
+ goto out;
+
+out_exit:
+ if (hba->vops->exit)
+ hba->vops->exit(hba);
+out:
+ if (err)
+ dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+ __func__, hba->vops ? hba->vops->name : "", err);
+ return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+ if (!hba->vops)
+ return;
+
+ if (hba->vops->setup_clocks)
+ hba->vops->setup_clocks(hba, false);
+
+ if (hba->vops->setup_regulators)
+ hba->vops->setup_regulators(hba, false);
+
+ if (hba->vops->exit)
+ hba->vops->exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+ int err;
+
+ /*
+ * Handle host controller power separately from the UFS device power
+ * rails as it will help controlling the UFS host controller power
+ * collapse easily which is different than UFS device power collapse.
+ * Also, enable the host controller power before we go ahead with rest
+ * of the initialization here.
+ */
+ err = ufshcd_init_hba_vreg(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_setup_hba_vreg(hba, true);
+ if (err)
+ goto out;
+
+ err = ufshcd_init_clocks(hba);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_setup_clocks(hba, true);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_init_vreg(hba);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_setup_vreg(hba, true);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_variant_hba_init(hba);
+ if (err)
+ goto out_disable_vreg;
+
+ hba->is_powered = true;
+ goto out;
+
+out_disable_vreg:
+ ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+ ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+ ufshcd_setup_hba_vreg(hba, false);
+out:
+ return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+ if (hba->is_powered) {
+ ufshcd_variant_hba_exit(hba);
+ ufshcd_setup_vreg(hba, false);
+ ufshcd_setup_clocks(hba, false);
+ ufshcd_setup_hba_vreg(hba, false);
+ hba->is_powered = false;
+ }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+ unsigned char cmd[6] = {REQUEST_SENSE,
+ 0,
+ 0,
+ 0,
+ SCSI_SENSE_BUFFERSIZE,
+ 0};
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ SCSI_SENSE_BUFFERSIZE, NULL,
+ msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+ if (ret)
+ pr_err("%s: failed with err %d\n", __func__, ret);
+
+ kfree(buffer);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ * power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode)
+{
+ unsigned char cmd[6] = { START_STOP };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp = hba->sdev_ufs_device;
+ int ret;
+
+ if (!sdp || !scsi_device_online(sdp))
+ return -ENODEV;
+
+ /*
+ * If scsi commands fail, the scsi mid-layer schedules scsi error-
+ * handling, which would wait for host to be resumed. Since we know
+ * we are functional while we are here, skip host resume in error
+ * handling context.
+ */
+ hba->host->eh_noresume = 1;
+ if (hba->wlun_dev_clr_ua) {
+ ret = ufshcd_send_request_sense(hba, sdp);
+ if (ret)
+ goto out;
+ /* Unit attention condition is cleared now */
+ hba->wlun_dev_clr_ua = false;
+ }
+
+ cmd[4] = pwr_mode << 4;
+
+ /*
+ * Current function would be generally called from the power management
+ * callbacks hence set the REQ_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+ ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+ START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sdp,
+ "START_STOP failed for power mode: %d\n", pwr_mode);
+ scsi_show_result(ret);
+ if (driver_byte(ret) & DRIVER_SENSE) {
+ scsi_show_sense_hdr(&sshdr);
+ scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
+ }
+ }
+
+ if (!ret)
+ hba->curr_dev_pwr_mode = pwr_mode;
+out:
+ hba->host->eh_noresume = 0;
+ return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+ enum uic_link_state req_link_state,
+ int check_for_bkops)
+{
+ int ret = 0;
+
+ if (req_link_state == hba->uic_link_state)
+ return 0;
+
+ if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ ufshcd_set_link_hibern8(hba);
+ else
+ goto out;
+ }
+ /*
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
+ */
+ else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+ ufshcd_hba_stop(hba);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+ /*
+ * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+ * power.
+ *
+ * If UFS device and link is in OFF state, all power supplies (VCC,
+ * VCCQ, VCCQ2) can be turned off if power on write protect is not
+ * required. If UFS link is inactive (Hibern8 or OFF state) and device
+ * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+ *
+ * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+ * in low power state which would save some power.
+ */
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ufshcd_setup_vreg(hba, false);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (!ufshcd_is_link_active(hba)) {
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ }
+ }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ret = ufshcd_setup_vreg(hba, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ if (!ret && !ufshcd_is_link_active(hba)) {
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vccq_lpm;
+ }
+ }
+ goto out;
+
+vccq_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+ return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, true);
+}
+
/**
- * ufshcd_suspend - suspend power management function
+ * ufshcd_suspend - helper function for suspend operations
* @hba: per adapter instance
- * @state: power state
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
*
- * Returns -ENOSYS
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
*/
-int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
+ int ret = 0;
+ enum ufs_pm_level pm_lvl;
+ enum ufs_dev_pwr_mode req_dev_pwr_mode;
+ enum uic_link_state req_link_state;
+
+ hba->pm_op_in_progress = 1;
+ if (!ufshcd_is_shutdown_pm(pm_op)) {
+ pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->rpm_lvl : hba->spm_lvl;
+ req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+ req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+ } else {
+ req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+ req_link_state = UIC_LINK_OFF_STATE;
+ }
+
/*
- * TODO:
- * 1. Block SCSI requests from SCSI midlayer
- * 2. Change the internal driver state to non operational
- * 3. Set UTRLRSR and UTMRLRSR bits to zero
- * 4. Wait until outstanding commands are completed
- * 5. Set HCE to zero to send the UFS host controller to reset state
+ * If we can't transition into any of the low power modes
+ * just gate the clocks.
*/
+ ufshcd_hold(hba, false);
+ hba->clk_gating.is_suspended = true;
+
+ if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ req_link_state == UIC_LINK_ACTIVE_STATE) {
+ goto disable_clks;
+ }
+
+ if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+ (req_link_state == hba->uic_link_state))
+ goto out;
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+ ret = -EINVAL;
+ goto out;
+ }
- return -ENOSYS;
+ if (ufshcd_is_runtime_pm(pm_op)) {
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
+ }
+
+ if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+ ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op))) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
+
+ ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ if (ret)
+ goto set_dev_active;
+
+ ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+ /*
+ * The clock scaling needs access to controller registers. Hence, Wait
+ * for pending clock scaling work to be done before clocks are
+ * turned off.
+ */
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+ /*
+ * Call vendor specific suspend callback. As these callbacks may access
+ * vendor specific host controller register space call them before the
+ * host clocks are ON.
+ */
+ if (hba->vops && hba->vops->suspend) {
+ ret = hba->vops->suspend(hba, pm_op);
+ if (ret)
+ goto set_link_active;
+ }
+
+ if (hba->vops && hba->vops->setup_clocks) {
+ ret = hba->vops->setup_clocks(hba, false);
+ if (ret)
+ goto vops_resume;
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ hba->clk_gating.state = CLKS_OFF;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller trasanction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ goto out;
+
+vops_resume:
+ if (hba->vops && hba->vops->resume)
+ hba->vops->resume(hba, pm_op);
+set_link_active:
+ ufshcd_vreg_set_hpm(hba);
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ ufshcd_set_link_active(hba);
+ else if (ufshcd_is_link_off(hba))
+ ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+ if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ ufshcd_disable_auto_bkops(hba);
+enable_gating:
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ufshcd_suspend);
/**
- * ufshcd_resume - resume power management function
+ * ufshcd_resume - helper function for resume operations
* @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
*
- * Returns -ENOSYS
+ * Returns 0 for success and non-zero for failure
*/
-int ufshcd_resume(struct ufs_hba *hba)
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
+ int ret;
+ enum uic_link_state old_link_state;
+
+ hba->pm_op_in_progress = 1;
+ old_link_state = hba->uic_link_state;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto out;
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ /*
+ * Call vendor specific resume callback. As these callbacks may access
+ * vendor specific host controller register space call them when the
+ * host clocks are ON.
+ */
+ if (hba->vops && hba->vops->resume) {
+ ret = hba->vops->resume(hba, pm_op);
+ if (ret)
+ goto disable_vreg;
+ }
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (!ret)
+ ufshcd_set_link_active(hba);
+ else
+ goto vendor_suspend;
+ } else if (ufshcd_is_link_off(hba)) {
+ ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * ufshcd_host_reset_and_restore() should have already
+ * set the link state as active
+ */
+ if (ret || !ufshcd_is_link_active(hba))
+ goto vendor_suspend;
+ }
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ if (ret)
+ goto set_old_link_state;
+ }
+
/*
- * TODO:
- * 1. Set HCE to 1, to start the UFS host controller
- * initialization process
- * 2. Set UTRLRSR and UTMRLRSR bits to 1
- * 3. Change the internal driver state to operational
- * 4. Unblock SCSI requests from SCSI midlayer
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
*/
+ ufshcd_urgent_bkops(hba);
+ hba->clk_gating.is_suspended = false;
+
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+ goto out;
+
+set_old_link_state:
+ ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+ if (hba->vops && hba->vops->suspend)
+ hba->vops->suspend(hba, pm_op);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+ ufshcd_disable_irq(hba);
+ ufshcd_setup_clocks(hba, false);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba || !hba->is_powered)
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ if (hba->rpm_lvl == hba->spm_lvl)
+ /*
+ * There is possibility that device may still be in
+ * active state during the runtime suspend.
+ */
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * UFS device and/or UFS link low power states during runtime
+ * suspend seems to be different than what is expected during
+ * system suspend. Hence runtime resume the devic & link and
+ * let the system suspend low power states to take effect.
+ * TODO: If resume takes longer time, we might have optimize
+ * it in future by not resuming everything if possible.
+ */
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+ if (!ret)
+ hba->is_sys_suspended = true;
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+ if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ /*
+ * Let the runtime resume take care of resuming
+ * if runtime suspended.
+ */
+ return 0;
- return -ENOSYS;
+ return ufshcd_resume(hba, UFS_SYSTEM_PM);
}
-EXPORT_SYMBOL_GPL(ufshcd_resume);
+EXPORT_SYMBOL(ufshcd_system_resume);
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba)
+ if (!hba || !hba->is_powered)
return 0;
- /*
- * The device is idle with no requests in the queue,
- * allow background operations.
- */
- return ufshcd_enable_auto_bkops(hba);
+ return ufshcd_suspend(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_suspend);
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ * to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ * S1: UFS device in Active state with VCC rail ON
+ * UniPro link in Active state
+ * All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba)
+ if (!hba || !hba->is_powered)
return 0;
-
- return ufshcd_disable_auto_bkops(hba);
+ else
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -3245,6 +5194,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
EXPORT_SYMBOL(ufshcd_runtime_idle);
/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
* @hba - per adapter instance
@@ -3252,11 +5231,17 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
void ufshcd_remove(struct ufs_hba *hba)
{
scsi_remove_host(hba->host);
+ ufshcd_scsi_remove_wlus(hba);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
scsi_host_put(hba->host);
+
+ ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_remove_device(hba->devfreq);
+ ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -3277,19 +5262,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
- * ufshcd_init - Driver initialization routine
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
- * @mmio_base: base register address
- * @irq: Interrupt line of device
* Returns 0 on success, non-zero value on failure
*/
-int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
- void __iomem *mmio_base, unsigned int irq)
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- int err;
+ int err = 0;
if (!dev) {
dev_err(dev,
@@ -3298,13 +5280,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
goto out_error;
}
- if (!mmio_base) {
- dev_err(dev,
- "Invalid memory reference for mmio_base is NULL\n");
- err = -ENODEV;
- goto out_error;
- }
-
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
@@ -3315,9 +5290,146 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
+ *hba_handle = hba;
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+ if (hba->vops->clk_scale_notify)
+ hba->vops->clk_scale_notify(hba);
+out:
+ return ret;
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int err = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return -EINVAL;
+
+ if (*freq == UINT_MAX)
+ err = ufshcd_scale_clks(hba, true);
+ else if (*freq == 0)
+ err = ufshcd_scale_clks(hba, false);
+
+ return err;
+}
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = ktime_set(0, 0);
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+ int err;
+ struct Scsi_Host *host = hba->host;
+ struct device *dev = hba->dev;
+
+ if (!mmio_base) {
+ dev_err(hba->dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
hba->mmio_base = mmio_base;
hba->irq = irq;
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+
/* Read capabilities registers */
ufshcd_hba_capabilities(hba);
@@ -3346,11 +5458,13 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
host->can_queue = hba->nutrs;
host->cmd_per_lun = hba->nutrs;
host->max_id = UFSHCD_MAX_ID;
- host->max_lun = UFSHCD_MAX_LUNS;
+ host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = MAX_CDB_SIZE;
+ hba->max_pwr_info.is_valid = false;
+
/* Initailize wait queue for task management */
init_waitqueue_head(&hba->tm_wq);
init_waitqueue_head(&hba->tm_tag_wq);
@@ -3368,24 +5482,27 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
+ ufshcd_init_clk_gating(hba);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
- goto out_disable;
+ goto exit_gating;
+ } else {
+ hba->is_irq_enabled = true;
}
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
dev_err(hba->dev, "init shared queue failed\n");
- goto out_disable;
+ goto exit_gating;
}
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
+ goto exit_gating;
}
/* Host controller enable */
@@ -3395,19 +5512,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
goto out_remove_scsi_host;
}
- *hba_handle = hba;
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ "simple_ondemand", NULL);
+ if (IS_ERR(hba->devfreq)) {
+ dev_err(hba->dev, "Unable to register with devfreq %ld\n",
+ PTR_ERR(hba->devfreq));
+ goto out_remove_scsi_host;
+ }
+ /* Suspend devfreq until the UFS device is detected */
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
+ /*
+ * The device-initialize-sequence hasn't been invoked yet.
+ * Set the device to power-off state
+ */
+ ufshcd_set_ufs_dev_poweroff(hba);
+
async_schedule(ufshcd_async_scan, hba);
return 0;
out_remove_scsi_host:
scsi_remove_host(hba->host);
+exit_gating:
+ ufshcd_exit_clk_gating(hba);
out_disable:
+ hba->is_irq_enabled = false;
scsi_host_put(host);
+ ufshcd_hba_exit(hba);
out_error:
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index acf318e338ed..58ecdff5065c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -52,6 +52,7 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -68,6 +69,8 @@
#define UFSHCD "ufshcd"
#define UFSHCD_DRIVER_VERSION "0.2"
+struct ufs_hba;
+
enum dev_cmd_type {
DEV_CMD_TYPE_NOP = 0x0,
DEV_CMD_TYPE_QUERY = 0x1,
@@ -93,6 +96,54 @@ struct uic_command {
struct completion done;
};
+/* Used to differentiate the power management options */
+enum ufs_pm_op {
+ UFS_RUNTIME_PM,
+ UFS_SYSTEM_PM,
+ UFS_SHUTDOWN_PM,
+};
+
+#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
+#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
+#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+ UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
+ UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
+ UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+};
+
+#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
+#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
+#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
+ UIC_LINK_HIBERN8_STATE)
+
+/*
+ * UFS Power management levels.
+ * Each level is in increasing order of power savings.
+ */
+enum ufs_pm_level {
+ UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+ UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+ UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_MAX
+};
+
+struct ufs_pm_lvl_states {
+ enum ufs_dev_pwr_mode dev_state;
+ enum uic_link_state link_state;
+};
+
/**
* struct ufshcd_lrb - local reference block
* @utr_descriptor_ptr: UTRD address of the command
@@ -121,7 +172,7 @@ struct ufshcd_lrb {
int command_type;
int task_tag;
- unsigned int lun;
+ u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
};
@@ -153,6 +204,126 @@ struct ufs_dev_cmd {
};
/**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+ * @clk: clock node
+ * @name: clock name
+ * @max_freq: maximum frequency supported by the clock
+ * @min_freq: min frequency that can be used for clock scaling
+ * @curr_freq: indicates the current frequency that it is set to
+ * @enabled: variable to check against multiple enable/disable
+ */
+struct ufs_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool enabled;
+};
+
+#define PRE_CHANGE 0
+#define POST_CHANGE 1
+
+struct ufs_pa_layer_attr {
+ u32 gear_rx;
+ u32 gear_tx;
+ u32 lane_rx;
+ u32 lane_tx;
+ u32 pwr_rx;
+ u32 pwr_tx;
+ u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+ bool is_valid;
+ struct ufs_pa_layer_attr info;
+};
+
+/**
+ * struct ufs_hba_variant_ops - variant specific callbacks
+ * @name: variant name
+ * @init: called when the driver is initialized
+ * @exit: called to cleanup everything done in init
+ * @clk_scale_notify: notifies that clks are scaled up/down
+ * @setup_clocks: called before touching any of the controller registers
+ * @setup_regulators: called before accessing the host controller
+ * @hce_enable_notify: called before and after HCE enable bit is set to allow
+ * variant specific Uni-Pro initialization.
+ * @link_startup_notify: called before and after Link startup is carried out
+ * to allow variant specific Uni-Pro initialization.
+ * @pwr_change_notify: called before and after a power mode change
+ * is carried out to allow vendor spesific capabilities
+ * to be set.
+ * @suspend: called during host controller PM callback
+ * @resume: called during host controller PM callback
+ */
+struct ufs_hba_variant_ops {
+ const char *name;
+ int (*init)(struct ufs_hba *);
+ void (*exit)(struct ufs_hba *);
+ void (*clk_scale_notify)(struct ufs_hba *);
+ int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_regulators)(struct ufs_hba *, bool);
+ int (*hce_enable_notify)(struct ufs_hba *, bool);
+ int (*link_startup_notify)(struct ufs_hba *, bool);
+ int (*pwr_change_notify)(struct ufs_hba *,
+ bool, struct ufs_pa_layer_attr *,
+ struct ufs_pa_layer_attr *);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+};
+
+/* clock gating state */
+enum clk_gating_state {
+ CLKS_OFF,
+ CLKS_ON,
+ REQ_CLKS_OFF,
+ REQ_CLKS_ON,
+};
+
+/**
+ * struct ufs_clk_gating - UFS clock gating related info
+ * @gate_work: worker to turn off clocks after some delay as specified in
+ * delay_ms
+ * @ungate_work: worker to turn on clocks that will be used in case of
+ * interrupt context
+ * @state: the current clocks state
+ * @delay_ms: gating delay in ms
+ * @is_suspended: clk gating is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before gating clocks.
+ */
+struct ufs_clk_gating {
+ struct delayed_work gate_work;
+ struct work_struct ungate_work;
+ enum clk_gating_state state;
+ unsigned long delay_ms;
+ bool is_suspended;
+ struct device_attribute delay_attr;
+ int active_reqs;
+};
+
+struct ufs_clk_scaling {
+ ktime_t busy_start_t;
+ bool is_busy_started;
+ unsigned long tot_busy_t;
+ unsigned long window_start_t;
+};
+
+/**
+ * struct ufs_init_prefetch - contains data that is pre-fetched once during
+ * initialization
+ * @icc_level: icc level which was read during initialization
+ */
+struct ufs_init_prefetch {
+ u32 icc_level;
+};
+
+/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
* @ucdl_base_addr: UFS Command Descriptor base address
@@ -171,6 +342,8 @@ struct ufs_dev_cmd {
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @ufs_version: UFS Version to which controller complies
+ * @vops: pointer to variant specific operations
+ * @priv: pointer to variant specific private data
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
@@ -183,6 +356,9 @@ struct ufs_dev_cmd {
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
+ * @is_powered: flag to check if HBA is powered
+ * @is_init_prefetch: flag to check if data was pre-fetched in initialization
+ * @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
@@ -191,6 +367,10 @@ struct ufs_dev_cmd {
* @saved_uic_err: sticky UIC error mask
* @dev_cmd: ufs device management command information
* @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @vreg_info: UFS device voltage regulator information
+ * @clk_list_head: UFS host controller clocks list node head
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -207,6 +387,21 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
+ /*
+ * This field is to keep a reference to "scsi_device" corresponding to
+ * "UFS device" W-LU.
+ */
+ struct scsi_device *sdev_ufs_device;
+ struct scsi_device *sdev_rpmb;
+ struct scsi_device *sdev_boot;
+
+ enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+ enum uic_link_state uic_link_state;
+ /* Desired UFS power management level during runtime PM */
+ enum ufs_pm_level rpm_lvl;
+ /* Desired UFS power management level during system PM */
+ enum ufs_pm_level spm_lvl;
+ int pm_op_in_progress;
struct ufshcd_lrb *lrb;
unsigned long lrb_in_use;
@@ -218,22 +413,28 @@ struct ufs_hba {
int nutrs;
int nutmrs;
u32 ufs_version;
+ struct ufs_hba_variant_ops *vops;
+ void *priv;
unsigned int irq;
+ bool is_irq_enabled;
- struct uic_command *active_uic_cmd;
- struct mutex uic_cmd_mutex;
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
unsigned long tm_condition;
unsigned long tm_slots_in_use;
- struct completion *pwr_done;
+ struct uic_command *active_uic_cmd;
+ struct mutex uic_cmd_mutex;
+ struct completion *uic_async_done;
u32 ufshcd_state;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
+ bool is_powered;
+ bool is_init_prefetch;
+ struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
struct work_struct eh_work;
@@ -248,16 +449,76 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
+ /* Keeps information of the UFS device connected to this host */
+ struct ufs_dev_info dev_info;
bool auto_bkops_enabled;
+ struct ufs_vreg_info vreg_info;
+ struct list_head clk_list_head;
+
+ bool wlun_dev_clr_ua;
+
+ struct ufs_pa_layer_attr pwr_info;
+ struct ufs_pwr_mode_info max_pwr_info;
+
+ struct ufs_clk_gating clk_gating;
+ /* Control to enable/disable host capabilities */
+ u32 caps;
+ /* Allow dynamic clk gating */
+#define UFSHCD_CAP_CLK_GATING (1 << 0)
+ /* Allow hiberb8 with clk gating */
+#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
+ /* Allow dynamic clk scaling */
+#define UFSHCD_CAP_CLK_SCALING (1 << 2)
+ /* Allow auto bkops to enabled during runtime suspend */
+#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
+
+ struct devfreq *devfreq;
+ struct ufs_clk_scaling clk_scaling;
+ bool is_sys_suspended;
};
+/* Returns true if clocks can be gated. Otherwise false */
+static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_GATING;
+}
+static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+}
+static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_SCALING;
+}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
readl((hba)->mmio_base + (reg))
-int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
- unsigned int);
+/**
+ * ufshcd_rmwl - read modify write into a register
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @reg - register address
+ */
+static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+{
+ u32 tmp;
+
+ tmp = ufshcd_readl(hba, reg);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ ufshcd_writel(hba, tmp, reg);
+}
+
+int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
void ufshcd_remove(struct ufs_hba *);
/**
@@ -275,11 +536,12 @@ static inline void check_upiu_size(void)
GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
}
-extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
-extern int ufshcd_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_system_suspend(struct ufs_hba *hba);
+extern int ufshcd_system_resume(struct ufs_hba *hba);
+extern int ufshcd_shutdown(struct ufs_hba *hba);
extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer);
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
@@ -331,4 +593,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
+int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_release(struct ufs_hba *hba);
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index e1b844bc9460..d5721199e9cc 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,8 +124,11 @@ enum {
#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
-#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\
- UIC_POWER_MODE)
+#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
+ UIC_HIBERNATE_EXIT |\
+ UIC_POWER_MODE)
+
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
#define UFSHCD_ERROR_MASK (UIC_ERROR |\
DEVICE_FATAL_ERROR |\
@@ -210,7 +213,7 @@ enum {
#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
/* UIC Commands */
-enum {
+enum uic_cmd_dme {
UIC_CMD_DME_GET = 0x01,
UIC_CMD_DME_SET = 0x02,
UIC_CMD_DME_PEER_GET = 0x03,
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 0bb8041c047a..3fc3e21b746b 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -13,6 +13,44 @@
#define _UNIPRO_H_
/*
+ * M-TX Configuration Attributes
+ */
+#define TX_MODE 0x0021
+#define TX_HSRATE_SERIES 0x0022
+#define TX_HSGEAR 0x0023
+#define TX_PWMGEAR 0x0024
+#define TX_AMPLITUDE 0x0025
+#define TX_HS_SLEWRATE 0x0026
+#define TX_SYNC_SOURCE 0x0027
+#define TX_HS_SYNC_LENGTH 0x0028
+#define TX_HS_PREPARE_LENGTH 0x0029
+#define TX_LS_PREPARE_LENGTH 0x002A
+#define TX_HIBERN8_CONTROL 0x002B
+#define TX_LCC_ENABLE 0x002C
+#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
+#define TX_BYPASS_8B10B_ENABLE 0x002E
+#define TX_DRIVER_POLARITY 0x002F
+#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
+#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
+#define TX_LCC_SEQUENCER 0x0032
+#define TX_MIN_ACTIVATETIME 0x0033
+#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
+
+/*
+ * M-RX Configuration Attributes
+ */
+#define RX_MODE 0x00A1
+#define RX_HSRATE_SERIES 0x00A2
+#define RX_HSGEAR 0x00A3
+#define RX_PWMGEAR 0x00A4
+#define RX_LS_TERMINATED_ENABLE 0x00A5
+#define RX_HS_UNTERMINATED_ENABLE 0x00A6
+#define RX_ENTER_HIBERN8 0x00A7
+#define RX_BYPASS_8B10B_ENABLE 0x00A8
+#define RX_TERMINATION_FORCE_ENABLE 0x0089
+
+#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+/*
* PHY Adpater attributes
*/
#define PA_ACTIVETXDATALANES 0x1560
@@ -87,6 +125,24 @@ enum {
PA_HS_MODE_B = 2,
};
+enum ufs_pwm_gear_tag {
+ UFS_PWM_DONT_CHANGE, /* Don't change Gear */
+ UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
+ UFS_PWM_G2, /* PWM Gear 2 */
+ UFS_PWM_G3, /* PWM Gear 3 */
+ UFS_PWM_G4, /* PWM Gear 4 */
+ UFS_PWM_G5, /* PWM Gear 5 */
+ UFS_PWM_G6, /* PWM Gear 6 */
+ UFS_PWM_G7, /* PWM Gear 7 */
+};
+
+enum ufs_hs_gear_tag {
+ UFS_HS_DONT_CHANGE, /* Don't change Gear */
+ UFS_HS_G1, /* HS Gear 1 (default for reset) */
+ UFS_HS_G2, /* HS Gear 2 */
+ UFS_HS_G3, /* HS Gear 3 */
+};
+
/*
* Data Link Layer Attributes
*/
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 447458e696a9..7e1f120f2b32 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -22,44 +22,63 @@
#define GSBI_CTRL_REG 0x0000
#define GSBI_PROTOCOL_SHIFT 4
+struct gsbi_info {
+ struct clk *hclk;
+ u32 mode;
+ u32 crci;
+};
+
static int gsbi_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct resource *res;
void __iomem *base;
- struct clk *hclk;
- u32 mode, crci = 0;
+ struct gsbi_info *gsbi;
+
+ gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+ if (!gsbi)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- if (of_property_read_u32(node, "qcom,mode", &mode)) {
+ if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
dev_err(&pdev->dev, "missing mode configuration\n");
return -EINVAL;
}
/* not required, so default to 0 if not present */
- of_property_read_u32(node, "qcom,crci", &crci);
+ of_property_read_u32(node, "qcom,crci", &gsbi->crci);
- dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+ gsbi->mode, gsbi->crci);
+ gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(gsbi->hclk))
+ return PTR_ERR(gsbi->hclk);
- hclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(hclk))
- return PTR_ERR(hclk);
+ clk_prepare_enable(gsbi->hclk);
- clk_prepare_enable(hclk);
-
- writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
+ writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
base + GSBI_CTRL_REG);
/* make sure the gsbi control write is not reordered */
wmb();
- clk_disable_unprepare(hclk);
+ platform_set_drvdata(pdev, gsbi);
+
+ return of_platform_populate(node, NULL, NULL, &pdev->dev);
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+ struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gsbi->hclk);
- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
}
static const struct of_device_id gsbi_dt_match[] = {
@@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = {
.of_match_table = gsbi_dt_match,
},
.probe = gsbi_probe,
+ .remove = gsbi_remove,
};
module_platform_driver(gsbi_driver);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 62e2242ad7e0..84e7c9e6ccef 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -69,6 +69,7 @@ config SPI_ATH79
config SPI_ATMEL
tristate "Atmel SPI Controller"
+ depends on HAS_DMA
depends on (ARCH_AT91 || AVR32 || COMPILE_TEST)
help
This selects a driver for the Atmel SPI Controller, present on
@@ -112,6 +113,14 @@ config SPI_AU1550
If you say yes to this option, support will be included for the
PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+config SPI_BCM53XX
+ tristate "Broadcom BCM53xx SPI controller"
+ depends on ARCH_BCM_5301X
+ depends on BCMA_POSSIBLE
+ select BCMA
+ help
+ Enable support for the SPI controller on Broadcom BCM53xx ARM SoCs.
+
config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
depends on BCM63XX
@@ -185,6 +194,7 @@ config SPI_EFM32
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
+ depends on HAS_DMA
depends on ARCH_EP93XX || COMPILE_TEST
help
This enables using the Cirrus EP93xx SPI controller in master
@@ -314,6 +324,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
+ depends on HAS_DMA
depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
@@ -380,7 +391,7 @@ config SPI_PXA2XX
additional documentation can be found a Documentation/spi/pxa2xx.
config SPI_PXA2XX_PCI
- def_tristate SPI_PXA2XX && PCI
+ def_tristate SPI_PXA2XX && PCI && COMMON_CLK
config SPI_ROCKCHIP
tristate "Rockchip SPI controller driver"
@@ -500,7 +511,7 @@ config SPI_MXS
config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
- depends on RESET_CONTROLLER
+ depends on RESET_CONTROLLER && HAS_DMA
help
SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
is different than the older SoCs SPI controller and also register interface
@@ -518,7 +529,7 @@ config SPI_TEGRA20_SFLASH
config SPI_TEGRA20_SLINK
tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
- depends on RESET_CONTROLLER
+ depends on RESET_CONTROLLER && HAS_DMA
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
@@ -591,7 +602,7 @@ config SPI_DW_PCI
depends on SPI_DESIGNWARE && PCI
config SPI_DW_MID_DMA
- bool "DMA support for DW SPI controller on Intel Moorestown platform"
+ bool "DMA support for DW SPI controller on Intel MID platform"
depends on SPI_DW_PCI && INTEL_MID_DMAC
config SPI_DW_MMIO
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 762da0741148..78f24ca36fcf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
+obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
new file mode 100644
index 000000000000..17b34cbadc03
--- /dev/null
+++ b/drivers/spi/spi-bcm53xx.c
@@ -0,0 +1,299 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+#include <linux/spi/spi.h>
+
+#include "spi-bcm53xx.h"
+
+#define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */
+
+/* The longest observed required wait was 19 ms */
+#define BCM53XXSPI_SPE_TIMEOUT_MS 80
+
+struct bcm53xxspi {
+ struct bcma_device *core;
+ struct spi_master *master;
+
+ size_t read_offset;
+};
+
+static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset)
+{
+ return bcma_read32(b53spi->core, offset);
+}
+
+static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset,
+ u32 value)
+{
+ bcma_write32(b53spi->core, offset, value);
+}
+
+static inline unsigned int bcm53xxspi_calc_timeout(size_t len)
+{
+ /* Do some magic calculation based on length and buad. Add 10% and 1. */
+ return (len * 9000 / BCM53XXSPI_MAX_SPI_BAUD * 110 / 100) + 1;
+}
+
+static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ /* SPE bit has to be 0 before we read MSPI STATUS */
+ deadline = jiffies + BCM53XXSPI_SPE_TIMEOUT_MS * HZ / 1000;
+ do {
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ if (!(tmp & B53SPI_MSPI_SPCR2_SPE))
+ break;
+ udelay(5);
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (tmp & B53SPI_MSPI_SPCR2_SPE)
+ goto spi_timeout;
+
+ /* Check status */
+ deadline = jiffies + timeout_ms * HZ / 1000;
+ do {
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS);
+ if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) {
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
+ return 0;
+ }
+
+ cpu_relax();
+ udelay(100);
+ } while (!time_after_eq(jiffies, deadline));
+
+spi_timeout:
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
+
+ pr_err("Timeout waiting for SPI to be ready!\n");
+
+ return -EBUSY;
+}
+
+static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
+ size_t len, bool cont)
+{
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* Transmit Register File MSB */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_TXRAM + 4 * (i * 2),
+ (unsigned int)w_buf[i]);
+ }
+
+ for (i = 0; i < len; i++) {
+ tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
+ B53SPI_CDRAM_PCS_DSCK;
+ if (!cont && i == len - 1)
+ tmp &= ~B53SPI_CDRAM_CONT;
+ tmp &= ~0x1;
+ /* Command Register File */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
+ }
+
+ /* Set queue pointers */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
+
+ if (cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
+
+ /* Start SPI transfer */
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ tmp |= B53SPI_MSPI_SPCR2_SPE;
+ if (cont)
+ tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
+
+ /* Wait for SPI to finish */
+ bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
+
+ if (!cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
+
+ b53spi->read_offset = len;
+}
+
+static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
+ size_t len, bool cont)
+{
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < b53spi->read_offset + len; i++) {
+ tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
+ B53SPI_CDRAM_PCS_DSCK;
+ if (!cont && i == b53spi->read_offset + len - 1)
+ tmp &= ~B53SPI_CDRAM_CONT;
+ tmp &= ~0x1;
+ /* Command Register File */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
+ }
+
+ /* Set queue pointers */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
+ b53spi->read_offset + len - 1);
+
+ if (cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
+
+ /* Start SPI transfer */
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ tmp |= B53SPI_MSPI_SPCR2_SPE;
+ if (cont)
+ tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
+
+ /* Wait for SPI to finish */
+ bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
+
+ if (!cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
+
+ for (i = 0; i < len; ++i) {
+ int offset = b53spi->read_offset + i;
+
+ /* Data stored in the transmit register file LSB */
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ }
+
+ b53spi->read_offset = 0;
+}
+
+static int bcm53xxspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm53xxspi *b53spi = spi_master_get_devdata(master);
+ u8 *buf;
+ size_t left;
+
+ if (t->tx_buf) {
+ buf = (u8 *)t->tx_buf;
+ left = t->len;
+ while (left) {
+ size_t to_write = min_t(size_t, 16, left);
+ bool cont = left - to_write > 0;
+
+ bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
+ left -= to_write;
+ buf += to_write;
+ }
+ }
+
+ if (t->rx_buf) {
+ buf = (u8 *)t->rx_buf;
+ left = t->len;
+ while (left) {
+ size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
+ left);
+ bool cont = left - to_read > 0;
+
+ bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
+ left -= to_read;
+ buf += to_read;
+ }
+ }
+
+ return 0;
+}
+
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static struct spi_board_info bcm53xx_info = {
+ .modalias = "bcm53xxspiflash",
+};
+
+static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl);
+
+static int bcm53xxspi_bcma_probe(struct bcma_device *core)
+{
+ struct bcm53xxspi *b53spi;
+ struct spi_master *master;
+ int err;
+
+ if (core->bus->drv_cc.core->id.rev != 42) {
+ pr_err("SPI on SoC with unsupported ChipCommon rev\n");
+ return -ENOTSUPP;
+ }
+
+ master = spi_alloc_master(&core->dev, sizeof(*b53spi));
+ if (!master)
+ return -ENOMEM;
+
+ b53spi = spi_master_get_devdata(master);
+ b53spi->master = master;
+ b53spi->core = core;
+
+ master->transfer_one = bcm53xxspi_transfer_one;
+
+ bcma_set_drvdata(core, b53spi);
+
+ err = devm_spi_register_master(&core->dev, master);
+ if (err) {
+ spi_master_put(master);
+ bcma_set_drvdata(core, NULL);
+ goto out;
+ }
+
+ /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
+ spi_new_device(master, &bcm53xx_info);
+
+out:
+ return err;
+}
+
+static void bcm53xxspi_bcma_remove(struct bcma_device *core)
+{
+ struct bcm53xxspi *b53spi = bcma_get_drvdata(core);
+
+ spi_unregister_master(b53spi->master);
+}
+
+static struct bcma_driver bcm53xxspi_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bcm53xxspi_bcma_tbl,
+ .probe = bcm53xxspi_bcma_probe,
+ .remove = bcm53xxspi_bcma_remove,
+};
+
+/**************************************************
+ * Init & exit
+ **************************************************/
+
+static int __init bcm53xxspi_module_init(void)
+{
+ int err = 0;
+
+ err = bcma_driver_register(&bcm53xxspi_bcma_driver);
+ if (err)
+ pr_err("Failed to register bcma driver: %d\n", err);
+
+ return err;
+}
+
+static void __exit bcm53xxspi_module_exit(void)
+{
+ bcma_driver_unregister(&bcm53xxspi_bcma_driver);
+}
+
+module_init(bcm53xxspi_module_init);
+module_exit(bcm53xxspi_module_exit);
+
+MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver");
+MODULE_AUTHOR("Rafał Miłecki <zajec5@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm53xx.h b/drivers/spi/spi-bcm53xx.h
new file mode 100644
index 000000000000..73575dfe6916
--- /dev/null
+++ b/drivers/spi/spi-bcm53xx.h
@@ -0,0 +1,72 @@
+#ifndef SPI_BCM53XX_H
+#define SPI_BCM53XX_H
+
+#define B53SPI_BSPI_REVISION_ID 0x000
+#define B53SPI_BSPI_SCRATCH 0x004
+#define B53SPI_BSPI_MAST_N_BOOT_CTRL 0x008
+#define B53SPI_BSPI_BUSY_STATUS 0x00c
+#define B53SPI_BSPI_INTR_STATUS 0x010
+#define B53SPI_BSPI_B0_STATUS 0x014
+#define B53SPI_BSPI_B0_CTRL 0x018
+#define B53SPI_BSPI_B1_STATUS 0x01c
+#define B53SPI_BSPI_B1_CTRL 0x020
+#define B53SPI_BSPI_STRAP_OVERRIDE_CTRL 0x024
+#define B53SPI_BSPI_FLEX_MODE_ENABLE 0x028
+#define B53SPI_BSPI_BITS_PER_CYCLE 0x02c
+#define B53SPI_BSPI_BITS_PER_PHASE 0x030
+#define B53SPI_BSPI_CMD_AND_MODE_BYTE 0x034
+#define B53SPI_BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
+#define B53SPI_BSPI_BSPI_XOR_VALUE 0x03c
+#define B53SPI_BSPI_BSPI_XOR_ENABLE 0x040
+#define B53SPI_BSPI_BSPI_PIO_MODE_ENABLE 0x044
+#define B53SPI_BSPI_BSPI_PIO_IODIR 0x048
+#define B53SPI_BSPI_BSPI_PIO_DATA 0x04c
+
+/* RAF */
+#define B53SPI_RAF_START_ADDR 0x100
+#define B53SPI_RAF_NUM_WORDS 0x104
+#define B53SPI_RAF_CTRL 0x108
+#define B53SPI_RAF_FULLNESS 0x10c
+#define B53SPI_RAF_WATERMARK 0x110
+#define B53SPI_RAF_STATUS 0x114
+#define B53SPI_RAF_READ_DATA 0x118
+#define B53SPI_RAF_WORD_CNT 0x11c
+#define B53SPI_RAF_CURR_ADDR 0x120
+
+/* MSPI */
+#define B53SPI_MSPI_SPCR0_LSB 0x200
+#define B53SPI_MSPI_SPCR0_MSB 0x204
+#define B53SPI_MSPI_SPCR1_LSB 0x208
+#define B53SPI_MSPI_SPCR1_MSB 0x20c
+#define B53SPI_MSPI_NEWQP 0x210
+#define B53SPI_MSPI_ENDQP 0x214
+#define B53SPI_MSPI_SPCR2 0x218
+#define B53SPI_MSPI_SPCR2_SPE 0x00000040
+#define B53SPI_MSPI_SPCR2_CONT_AFTER_CMD 0x00000080
+#define B53SPI_MSPI_MSPI_STATUS 0x220
+#define B53SPI_MSPI_MSPI_STATUS_SPIF 0x00000001
+#define B53SPI_MSPI_CPTQP 0x224
+#define B53SPI_MSPI_TXRAM 0x240 /* 32 registers, up to 0x2b8 */
+#define B53SPI_MSPI_RXRAM 0x2c0 /* 32 registers, up to 0x33c */
+#define B53SPI_MSPI_CDRAM 0x340 /* 16 registers, up to 0x37c */
+#define B53SPI_CDRAM_PCS_PCS0 0x00000001
+#define B53SPI_CDRAM_PCS_PCS1 0x00000002
+#define B53SPI_CDRAM_PCS_PCS2 0x00000004
+#define B53SPI_CDRAM_PCS_PCS3 0x00000008
+#define B53SPI_CDRAM_PCS_DISABLE_ALL 0x0000000f
+#define B53SPI_CDRAM_PCS_DSCK 0x00000010
+#define B53SPI_CDRAM_BITSE 0x00000040
+#define B53SPI_CDRAM_CONT 0x00000080
+#define B53SPI_MSPI_WRITE_LOCK 0x380
+#define B53SPI_MSPI_DISABLE_FLUSH_GEN 0x384
+
+/* Interrupt */
+#define B53SPI_INTR_RAF_LR_FULLNESS_REACHED 0x3a0
+#define B53SPI_INTR_RAF_LR_TRUNCATED 0x3a4
+#define B53SPI_INTR_RAF_LR_IMPATIENT 0x3a8
+#define B53SPI_INTR_RAF_LR_SESSION_DONE 0x3ac
+#define B53SPI_INTR_RAF_LR_OVERREAD 0x3b0
+#define B53SPI_INTR_MSPI_DONE 0x3b4
+#define B53SPI_INTR_MSPI_HALT_SET_TRANSACTION_DONE 0x3b8
+
+#endif /* SPI_BCM53XX_H */
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 562ff83debd9..7b811e38c7ad 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -677,7 +677,6 @@ static struct platform_driver cdns_spi_driver = {
.remove = cdns_spi_remove,
.driver = {
.name = CDNS_SPI_NAME,
- .owner = THIS_MODULE,
.of_match_table = cdns_spi_of_match,
.pm = &cdns_spi_dev_pm_ops,
},
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index ce538dad526b..181cf2262006 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -30,7 +30,6 @@
struct spi_clps711x_data {
void __iomem *syncio;
struct regmap *syscon;
- struct regmap *syscon1;
struct clk *spi_clk;
u8 *tx_buf;
@@ -47,27 +46,6 @@ static int spi_clps711x_setup(struct spi_device *spi)
return 0;
}
-static void spi_clps711x_setup_xfer(struct spi_device *spi,
- struct spi_transfer *xfer)
-{
- struct spi_master *master = spi->master;
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
-
- /* Setup SPI frequency divider */
- if (xfer->speed_hz >= master->max_speed_hz)
- regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
- SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3));
- else if (xfer->speed_hz >= (master->max_speed_hz / 2))
- regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
- SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2));
- else if (xfer->speed_hz >= (master->max_speed_hz / 8))
- regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
- SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1));
- else
- regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
- SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0));
-}
-
static int spi_clps711x_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -87,7 +65,7 @@ static int spi_clps711x_transfer_one(struct spi_master *master,
struct spi_clps711x_data *hw = spi_master_get_devdata(master);
u8 data;
- spi_clps711x_setup_xfer(spi, xfer);
+ clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz);
hw->len = xfer->len;
hw->bpw = xfer->bits_per_word;
@@ -176,13 +154,11 @@ static int spi_clps711x_probe(struct platform_device *pdev)
}
}
- hw->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hw->spi_clk)) {
- dev_err(&pdev->dev, "Can't get clocks\n");
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
- master->max_speed_hz = clk_get_rate(hw->spi_clk);
hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
if (IS_ERR(hw->syscon)) {
@@ -190,12 +166,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
goto err_out;
}
- hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1");
- if (IS_ERR(hw->syscon1)) {
- ret = PTR_ERR(hw->syscon1);
- goto err_out;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hw->syncio = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hw->syncio)) {
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 134fb6eb7b19..63700ab7bd9f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -65,6 +65,7 @@
/* SPIDAT1 (upper 16 bit defines) */
#define SPIDAT1_CSHOLD_MASK BIT(12)
+#define SPIDAT1_WDEL BIT(10)
/* SPIGCR1 */
#define SPIGCR1_CLKMOD_MASK BIT(1)
@@ -167,8 +168,10 @@ static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
{
u32 data = 0;
+
if (dspi->tx) {
const u8 *tx = dspi->tx;
+
data = *tx++;
dspi->tx = tx;
}
@@ -178,8 +181,10 @@ static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
{
u32 data = 0;
+
if (dspi->tx) {
const u16 *tx = dspi->tx;
+
data = *tx++;
dspi->tx = tx;
}
@@ -209,6 +214,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
{
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
+ struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi->chip_select;
u16 spidat1 = CS_DEFAULT;
bool gpio_chipsel = false;
@@ -223,6 +229,10 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
gpio = spi->cs_gpio;
}
+ /* program delay transfers if tx_delay is non zero */
+ if (spicfg->wdelay)
+ spidat1 |= SPIDAT1_WDEL;
+
/*
* Board specific chip select logic decides the polarity and cs
* line for the controller
@@ -237,9 +247,9 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
spidat1 |= SPIDAT1_CSHOLD_MASK;
spidat1 &= ~(0x1 << chip_sel);
}
-
- iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
}
+
+ iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
}
/**
@@ -285,7 +295,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
int prescale;
dspi = spi_master_get_devdata(spi->master);
- spicfg = (struct davinci_spi_config *)spi->controller_data;
+ spicfg = spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
@@ -333,6 +343,14 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
spifmt |= SPIFMT_PHASE_MASK;
/*
+ * Assume wdelay is used only on SPI peripherals that has this field
+ * in SPIFMTn register and when it's configured from board file or DT.
+ */
+ if (spicfg->wdelay)
+ spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
+ & SPIFMT_WDELAY_MASK);
+
+ /*
* Version 1 hardware supports two basic SPI modes:
* - Standard SPI mode uses 4 pins, with chipselect
* - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
@@ -349,9 +367,6 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
u32 delay = 0;
- spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
- & SPIFMT_WDELAY_MASK);
-
if (spicfg->odd_parity)
spifmt |= SPIFMT_ODD_PARITY_MASK;
@@ -383,6 +398,26 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
return 0;
}
+static int davinci_spi_of_setup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ struct device_node *np = spi->dev.of_node;
+ u32 prop;
+
+ if (spicfg == NULL && np) {
+ spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
+ if (!spicfg)
+ return -ENOMEM;
+ *spicfg = davinci_spi_default_cfg;
+ /* override with dt configured values */
+ if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
+ spicfg->wdelay = (u8)prop;
+ spi->controller_data = spicfg;
+ }
+
+ return 0;
+}
+
/**
* davinci_spi_setup - This functions will set default transfer method
* @spi: spi device on which data transfer to be done
@@ -433,7 +468,16 @@ static int davinci_spi_setup(struct spi_device *spi)
else
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
- return retval;
+ return davinci_spi_of_setup(spi);
+}
+
+static void davinci_spi_cleanup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(spicfg);
}
static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
@@ -947,6 +991,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
master->num_chipselect = pdata->num_chipselect;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
master->setup = davinci_spi_setup;
+ master->cleanup = davinci_spi_cleanup;
dspi->bitbang.chipselect = davinci_spi_chipselect;
dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
@@ -996,8 +1041,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_clk;
dev_info(&pdev->dev, "DMA: supported\n");
- dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, "
- "event queue: %d\n", &dma_rx_chan, &dma_tx_chan,
+ dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
+ &dma_rx_chan, &dma_tx_chan,
pdata->dma_event_q);
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 6d207afec8cb..46c6d58e1fda 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -1,7 +1,7 @@
/*
* Special handling for DW core on Intel MID platform
*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -11,10 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/dma-mapping.h>
@@ -39,22 +35,25 @@ static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
struct dw_spi *dws = param;
- return dws->dmac && (&dws->dmac->dev == chan->device->dev);
+ return dws->dma_dev == chan->device->dev;
}
static int mid_spi_dma_init(struct dw_spi *dws)
{
struct mid_dma *dw_dma = dws->dma_priv;
+ struct pci_dev *dma_dev;
struct intel_mid_dma_slave *rxs, *txs;
dma_cap_mask_t mask;
/*
* Get pci device for DMA controller, currently it could only
- * be the DMA controller of either Moorestown or Medfield
+ * be the DMA controller of Medfield
*/
- dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
- if (!dws->dmac)
- dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dma_dev)
+ return -ENODEV;
+
+ dws->dma_dev = &dma_dev->dev;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -83,13 +82,18 @@ static int mid_spi_dma_init(struct dw_spi *dws)
free_rxchan:
dma_release_channel(dws->rxchan);
err_exit:
- return -1;
-
+ return -EBUSY;
}
static void mid_spi_dma_exit(struct dw_spi *dws)
{
+ if (!dws->dma_inited)
+ return;
+
+ dmaengine_terminate_all(dws->txchan);
dma_release_channel(dws->txchan);
+
+ dmaengine_terminate_all(dws->rxchan);
dma_release_channel(dws->rxchan);
}
@@ -109,8 +113,7 @@ static void dw_spi_dma_done(void *arg)
static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
{
- struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
- struct dma_chan *txchan, *rxchan;
+ struct dma_async_tx_descriptor *txdesc, *rxdesc;
struct dma_slave_config txconf, rxconf;
u16 dma_ctrl = 0;
@@ -120,37 +123,34 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
dw_writew(dws, DW_SPI_DMARDLR, 0xf);
dw_writew(dws, DW_SPI_DMATDLR, 0x10);
if (dws->tx_dma)
- dma_ctrl |= 0x2;
+ dma_ctrl |= SPI_DMA_TDMAE;
if (dws->rx_dma)
- dma_ctrl |= 0x1;
+ dma_ctrl |= SPI_DMA_RDMAE;
dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
spi_enable_chip(dws, 1);
}
dws->dma_chan_done = 0;
- txchan = dws->txchan;
- rxchan = dws->rxchan;
/* 2. Prepare the TX dma transfer */
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ txconf.dst_addr_width = dws->dma_width;
txconf.device_fc = false;
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &txconf);
+ dmaengine_slave_config(dws->txchan, &txconf);
memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
dws->tx_sgl.dma_address = dws->tx_dma;
dws->tx_sgl.length = dws->len;
- txdesc = dmaengine_prep_slave_sg(txchan,
+ txdesc = dmaengine_prep_slave_sg(dws->txchan,
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
@@ -159,27 +159,30 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ rxconf.src_addr_width = dws->dma_width;
rxconf.device_fc = false;
- rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rxconf);
+ dmaengine_slave_config(dws->rxchan, &rxconf);
memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
dws->rx_sgl.dma_address = dws->rx_dma;
dws->rx_sgl.length = dws->len;
- rxdesc = dmaengine_prep_slave_sg(rxchan,
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
/* rx must be started before tx due to spi instinct */
- rxdesc->tx_submit(rxdesc);
- txdesc->tx_submit(txdesc);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dws->rxchan);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+
return 0;
}
@@ -190,7 +193,7 @@ static struct dw_spi_dma_ops mid_dma_ops = {
};
#endif
-/* Some specific info for SPI0 controller on Moorestown */
+/* Some specific info for SPI0 controller on Intel MID */
/* HW info for MRST CLk Control Unit, one 32b reg */
#define MRST_SPI_CLK_BASE 100000000 /* 100m */
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index e14960470d8d..ba68da12cdf0 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -1,7 +1,7 @@
/*
* PCI interface driver for DW SPI Core
*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -11,10 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/interrupt.h>
@@ -32,17 +28,22 @@ struct dw_spi_pci {
struct dw_spi dws;
};
-static int spi_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+struct spi_pci_desc {
+ int (*setup)(struct dw_spi *);
+};
+
+static struct spi_pci_desc spi_pci_mid_desc = {
+ .setup = dw_spi_mid_init,
+};
+
+static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct dw_spi_pci *dwpci;
struct dw_spi *dws;
+ struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data;
int pci_bar = 0;
int ret;
- dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
- pdev->vendor, pdev->device);
-
ret = pcim_enable_device(pdev);
if (ret)
return ret;
@@ -58,7 +59,7 @@ static int spi_pci_probe(struct pci_dev *pdev,
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
- ret = pcim_iomap_regions(pdev, 1, dev_name(&pdev->dev));
+ ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
if (ret)
return ret;
@@ -69,11 +70,11 @@ static int spi_pci_probe(struct pci_dev *pdev,
dws->irq = pdev->irq;
/*
- * Specific handling for Intel MID paltforms, like dma setup,
+ * Specific handling for paltforms, like dma setup,
* clock rate, FIFO depth.
*/
- if (pdev->device == 0x0800) {
- ret = dw_spi_mid_init(dws);
+ if (desc && desc->setup) {
+ ret = desc->setup(dws);
if (ret)
return ret;
}
@@ -85,6 +86,9 @@ static int spi_pci_probe(struct pci_dev *pdev,
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
return 0;
}
@@ -95,41 +99,29 @@ static void spi_pci_remove(struct pci_dev *pdev)
dw_spi_remove_host(&dwpci->dws);
}
-#ifdef CONFIG_PM
-static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int spi_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
- int ret;
- ret = dw_spi_suspend_host(&dwpci->dws);
- if (ret)
- return ret;
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return ret;
+ return dw_spi_suspend_host(&dwpci->dws);
}
-static int spi_resume(struct pci_dev *pdev)
+static int spi_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
- int ret;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
return dw_spi_resume_host(&dwpci->dws);
}
-#else
-#define spi_suspend NULL
-#define spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
+
static const struct pci_device_id pci_ids[] = {
/* Intel MID platform SPI controller 0 */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+ { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc},
{},
};
@@ -138,8 +130,9 @@ static struct pci_driver dw_spi_driver = {
.id_table = pci_ids,
.probe = spi_pci_probe,
.remove = spi_pci_remove,
- .suspend = spi_suspend,
- .resume = spi_resume,
+ .driver = {
+ .pm = &dw_spi_pm_ops,
+ },
};
module_pci_driver(dw_spi_driver);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 0dd0623319b0..729215885250 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -11,10 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/dma-mapping.h>
@@ -59,22 +55,20 @@ struct chip_data {
#ifdef CONFIG_DEBUG_FS
#define SPI_REGS_BUFSIZE 1024
-static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct dw_spi *dws;
+ struct dw_spi *dws = file->private_data;
char *buf;
u32 len = 0;
ssize_t ret;
- dws = file->private_data;
-
buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
if (!buf)
return 0;
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "MRST SPI0 registers:\n");
+ "%s registers:\n", dev_name(&dws->master->dev));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
@@ -110,42 +104,41 @@ static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
-static const struct file_operations mrst_spi_regs_ops = {
+static const struct file_operations dw_spi_regs_ops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = spi_show_regs,
+ .read = dw_spi_show_regs,
.llseek = default_llseek,
};
-static int mrst_spi_debugfs_init(struct dw_spi *dws)
+static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
+ dws->debugfs = debugfs_create_dir("dw_spi", NULL);
if (!dws->debugfs)
return -ENOMEM;
debugfs_create_file("registers", S_IFREG | S_IRUGO,
- dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
+ dws->debugfs, (void *)dws, &dw_spi_regs_ops);
return 0;
}
-static void mrst_spi_debugfs_remove(struct dw_spi *dws)
+static void dw_spi_debugfs_remove(struct dw_spi *dws)
{
- if (dws->debugfs)
- debugfs_remove_recursive(dws->debugfs);
+ debugfs_remove_recursive(dws->debugfs);
}
#else
-static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
+static inline int dw_spi_debugfs_init(struct dw_spi *dws)
{
return 0;
}
-static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
+static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
{
}
#endif /* CONFIG_DEBUG_FS */
@@ -177,7 +170,7 @@ static inline u32 rx_max(struct dw_spi *dws)
{
u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
- return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR));
+ return min_t(u32, rx_left, dw_readw(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
@@ -228,8 +221,9 @@ static void *next_transfer(struct dw_spi *dws)
struct spi_transfer,
transfer_list);
return RUNNING_STATE;
- } else
- return DONE_STATE;
+ }
+
+ return DONE_STATE;
}
/*
@@ -396,7 +390,7 @@ static void pump_transfers(unsigned long data)
goto early_exit;
}
- /* Delay if requested at end of transfer*/
+ /* Delay if requested at end of transfer */
if (message->state == RUNNING_STATE) {
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
@@ -471,10 +465,12 @@ static void pump_transfers(unsigned long data)
*/
if (!dws->dma_mapped && !chip->poll_mode) {
int templen = dws->len / dws->n_bytes;
+
txint_level = dws->fifo_len / 2;
txint_level = (templen > txint_level) ? txint_level : templen;
- imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
+ imask |= SPI_INT_TXEI | SPI_INT_TXOI |
+ SPI_INT_RXUI | SPI_INT_RXOI;
dws->transfer_handler = interrupt_transfer;
}
@@ -515,7 +511,6 @@ static void pump_transfers(unsigned long data)
early_exit:
giveback(dws);
- return;
}
static int dw_spi_transfer_one_message(struct spi_master *master,
@@ -524,7 +519,7 @@ static int dw_spi_transfer_one_message(struct spi_master *master,
struct dw_spi *dws = spi_master_get_devdata(master);
dws->cur_msg = msg;
- /* Initial message state*/
+ /* Initial message state */
dws->cur_msg->state = START_STATE;
dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
struct spi_transfer,
@@ -595,6 +590,9 @@ static int dw_spi_setup(struct spi_device *spi)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
+ if (spi->mode & SPI_LOOP)
+ chip->cr0 |= 1 << SPI_SRL_OFFSET;
+
if (gpio_is_valid(spi->cs_gpio)) {
ret = gpio_direction_output(spi->cs_gpio,
!(spi->mode & SPI_CS_HIGH));
@@ -626,6 +624,7 @@ static void spi_hw_init(struct dw_spi *dws)
*/
if (!dws->fifo_len) {
u32 fifo;
+
for (fifo = 2; fifo <= 257; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
@@ -653,8 +652,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->prev_chip = NULL;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
- snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
- dws->bus_num);
+ snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
dws->name, dws);
@@ -663,7 +661,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
goto err_free_master;
}
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
@@ -692,7 +690,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
goto err_dma_exit;
}
- mrst_spi_debugfs_init(dws);
+ dw_spi_debugfs_init(dws);
return 0;
err_dma_exit:
@@ -709,7 +707,7 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
if (!dws)
return;
- mrst_spi_debugfs_remove(dws);
+ dw_spi_debugfs_remove(dws);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 6d2acad34f64..83a103a76481 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -74,6 +74,10 @@
#define SPI_INT_RXFI (1 << 4)
#define SPI_INT_MSTI (1 << 5)
+/* Bit fields in DMACR */
+#define SPI_DMA_RDMAE (1 << 0)
+#define SPI_DMA_TDMAE (1 << 1)
+
/* TX RX interrupt level threshold, max can be 256 */
#define SPI_INT_THRESHOLD 32
@@ -140,7 +144,6 @@ struct dw_spi {
dma_addr_t dma_addr; /* phy address of the Data register */
struct dw_spi_dma_ops *dma_ops;
void *dma_priv; /* platform relate info */
- struct pci_dev *dmac;
/* Bus interface info */
void *priv;
@@ -217,11 +220,11 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
* Each SPI slave device to work with dw_api controller should
* has such a structure claiming its working mode (PIO/DMA etc),
* which can be save in the "controller_data" member of the
- * struct spi_device
+ * struct spi_device.
*/
struct dw_spi_chip {
- u8 poll_mode; /* 0 for contoller polling mode */
- u8 type; /* SPI/SSP/Micrwire */
+ u8 poll_mode; /* 1 for controller polling mode */
+ u8 type; /* SPI/SSP/MicroWire */
u8 enable_dma;
void (*cs_control)(u32 command);
};
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 2f675d32df0e..bf9728773247 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -266,6 +266,7 @@ static int ep93xx_spi_setup(struct spi_device *spi)
if (chip->ops && chip->ops->setup) {
int ret = chip->ops->setup(spi);
+
if (ret) {
kfree(chip);
return ret;
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 54b06376f03c..c5dd20beee22 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -15,17 +15,17 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/fsl_devices.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
#include <asm/cpm.h>
#include <asm/qe.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
-#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
+#include "spi-fsl-lib.h"
#include "spi-fsl-spi.h"
/* CPM1 and CPM2 are mutually exclusive. */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 5021ddf03f60..448216025ce8 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -13,22 +13,22 @@
*
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#define DRIVER_NAME "fsl-dspi"
@@ -493,9 +493,6 @@ static int dspi_probe(struct platform_device *pdev)
}
dspi_regmap_config.lock_arg = dspi;
- dspi_regmap_config.val_format_endian =
- of_property_read_bool(np, "big-endian")
- ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT;
dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
&dspi_regmap_config);
if (IS_ERR(dspi->regmap)) {
@@ -535,7 +532,6 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
- pr_info(KERN_INFO "Freescale DSPI master initialized\n");
return ret;
out_clk_put:
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 429e11190265..a7f94b6a9e70 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -8,19 +8,19 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/spi/spi.h>
-#include <linux/platform_device.h>
+#include <linux/err.h>
#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
#include <sysdev/fsl_soc.h>
#include "spi-fsl-lib.h"
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index e0b773fc29cb..5ddb5b098e4e 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -16,10 +16,10 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_platform.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 590f31bc0aba..ed792880c9d6 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -19,25 +19,25 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/platform_device.h>
-#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 5daff2054ae4..3637847b5370 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -21,6 +21,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
@@ -37,6 +39,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/platform_data/dma-imx.h>
#include <linux/platform_data/spi-imx.h>
#define DRIVER_NAME "spi_imx"
@@ -51,6 +54,9 @@
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+/* The maximum bytes that a sdma BD can transfer.*/
+#define MAX_SDMA_BD_BYTES (1 << 15)
+#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
struct spi_imx_config {
unsigned int speed_hz;
unsigned int bpw;
@@ -95,6 +101,16 @@ struct spi_imx_data {
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
+ /* DMA */
+ unsigned int dma_is_inited;
+ unsigned int dma_finished;
+ bool usedma;
+ u32 rx_wml;
+ u32 tx_wml;
+ u32 rxt_wml;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+
const struct spi_imx_devtype_data *devtype_data;
int chipselect[0];
};
@@ -181,9 +197,21 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
return 7;
}
+static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
+ && (transfer->len > spi_imx->tx_wml))
+ return true;
+ return false;
+}
+
#define MX51_ECSPI_CTRL 0x08
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
#define MX51_ECSPI_CTRL_XCH (1 << 2)
+#define MX51_ECSPI_CTRL_SMC (1 << 3)
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
@@ -201,6 +229,18 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_DMA 0x14
+#define MX51_ECSPI_DMA_TX_WML_OFFSET 0
+#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F
+#define MX51_ECSPI_DMA_RX_WML_OFFSET 16
+#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
+#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
+#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
+
+#define MX51_ECSPI_DMA_TEDEN_OFFSET 7
+#define MX51_ECSPI_DMA_RXDEN_OFFSET 23
+#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31
+
#define MX51_ECSPI_STAT 0x18
#define MX51_ECSPI_STAT_RR (1 << 3)
@@ -257,17 +297,22 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
- u32 reg;
-
- reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
- reg |= MX51_ECSPI_CTRL_XCH;
+ u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+
+ if (!spi_imx->usedma)
+ reg |= MX51_ECSPI_CTRL_XCH;
+ else if (!spi_imx->dma_finished)
+ reg |= MX51_ECSPI_CTRL_SMC;
+ else
+ reg &= ~MX51_ECSPI_CTRL_SMC;
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
+ u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
u32 clk = config->speed_hz, delay;
/*
@@ -319,6 +364,30 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
else /* SCLK is _very_ slow */
usleep_range(delay, delay + 10);
+ /*
+ * Configure the DMA register: setup the watermark
+ * and enable DMA request.
+ */
+ if (spi_imx->dma_is_inited) {
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+
+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+ tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+ rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
+ dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
+ & ~MX51_ECSPI_DMA_RX_WML_MASK
+ & ~MX51_ECSPI_DMA_RXT_WML_MASK)
+ | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
+ |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
+ |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
+ |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
+
+ writel(dma, spi_imx->base + MX51_ECSPI_DMA);
+ }
+
return 0;
}
@@ -730,7 +799,186 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return 0;
}
-static int spi_imx_transfer(struct spi_device *spi,
+static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
+{
+ struct spi_master *master = spi_imx->bitbang.master;
+
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+ spi_imx->dma_is_inited = 0;
+}
+
+static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ struct spi_master *master,
+ const struct resource *res)
+{
+ struct dma_slave_config slave_config = {};
+ int ret;
+
+ /* Prepare for TX DMA: */
+ master->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!master->dma_tx) {
+ dev_err(dev, "cannot get the TX DMA channel!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = res->start + MXC_CSPITXDATA;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+ ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in TX dma configuration.\n");
+ goto err;
+ }
+
+ /* Prepare for RX : */
+ master->dma_rx = dma_request_slave_channel(dev, "rx");
+ if (!master->dma_rx) {
+ dev_dbg(dev, "cannot get the DMA channel.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = res->start + MXC_CSPIRXDATA;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+ ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in RX dma configuration.\n");
+ goto err;
+ }
+
+ init_completion(&spi_imx->dma_rx_completion);
+ init_completion(&spi_imx->dma_tx_completion);
+ master->can_dma = spi_imx_can_dma;
+ master->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
+ SPI_MASTER_MUST_TX;
+ spi_imx->dma_is_inited = 1;
+
+ return 0;
+err:
+ spi_imx_sdma_exit(spi_imx);
+ return ret;
+}
+
+static void spi_imx_dma_rx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_rx_completion);
+}
+
+static void spi_imx_dma_tx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_tx_completion);
+}
+
+static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ int ret;
+ u32 dma;
+ int left;
+ struct spi_master *master = spi_imx->bitbang.master;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ tx->sgl, tx->nents, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx)
+ goto no_dma;
+
+ desc_tx->callback = spi_imx_dma_tx_callback;
+ desc_tx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_tx);
+ }
+
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ rx->sgl, rx->nents, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ goto no_dma;
+
+ desc_rx->callback = spi_imx_dma_rx_callback;
+ desc_rx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_rx);
+ }
+
+ reinit_completion(&spi_imx->dma_rx_completion);
+ reinit_completion(&spi_imx->dma_tx_completion);
+
+ /* Trigger the cspi module. */
+ spi_imx->dma_finished = 0;
+
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+ dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
+ /* Change RX_DMA_LENGTH trigger dma fetch tail data */
+ left = transfer->len % spi_imx->rxt_wml;
+ if (left)
+ writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
+ spi_imx->base + MX51_ECSPI_DMA);
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(master->dma_rx);
+ /* Wait SDMA to finish the data transfer.*/
+ ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+ IMX_DMA_TIMEOUT);
+ if (!ret) {
+ pr_warn("%s %s: I/O Error in DMA TX\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ dmaengine_terminate_all(master->dma_tx);
+ } else {
+ ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
+ IMX_DMA_TIMEOUT);
+ if (!ret) {
+ pr_warn("%s %s: I/O Error in DMA RX\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(master->dma_rx);
+ }
+ writel(dma |
+ spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
+ spi_imx->base + MX51_ECSPI_DMA);
+ }
+
+ spi_imx->dma_finished = 1;
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = transfer->len;
+
+ return ret;
+
+no_dma:
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ return -EAGAIN;
+}
+
+static int spi_imx_pio_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -751,6 +999,24 @@ static int spi_imx_transfer(struct spi_device *spi,
return transfer->len;
}
+static int spi_imx_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ int ret;
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+
+ if (spi_imx->bitbang.master->can_dma &&
+ spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
+ spi_imx->usedma = true;
+ ret = spi_imx_dma_transfer(spi_imx, transfer);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+ spi_imx->usedma = false;
+
+ return spi_imx_pio_transfer(spi, transfer);
+}
+
static int spi_imx_setup(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -911,6 +1177,13 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_put_per;
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
+ /*
+ * Only validated on i.mx6 now, can remove the constrain if validated on
+ * other chips.
+ */
+ if (spi_imx->devtype_data == &imx51_ecspi_devtype_data
+ && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res))
+ dev_err(&pdev->dev, "dma setup error,use pio instead\n");
spi_imx->devtype_data->reset(spi_imx);
@@ -949,6 +1222,7 @@ static int spi_imx_remove(struct platform_device *pdev)
writel(0, spi_imx->base + MXC_CSPICTRL);
clk_unprepare(spi_imx->clk_ipg);
clk_unprepare(spi_imx->clk_per);
+ spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 2884f0c2f5f0..51460878af04 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -85,7 +85,7 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
mxs_ssp_set_clk_rate(ssp, hz);
/*
* Save requested rate, hz, rather than the actual rate,
- * ssp->clk_rate. Otherwise we would set the rate every trasfer
+ * ssp->clk_rate. Otherwise we would set the rate every transfer
* when the actual rate is not quite the same as requested rate.
*/
spi->sck = hz;
@@ -154,12 +154,14 @@ static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
static void mxs_ssp_dma_irq_callback(void *param)
{
struct mxs_spi *spi = param;
+
complete(&spi->c);
}
static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
{
struct mxs_ssp *ssp = dev_id;
+
dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
__func__, __LINE__,
readl(ssp->base + HW_SSP_CTRL1(ssp)),
@@ -189,7 +191,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
if (!len)
return -EINVAL;
- dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
+ dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
if (!dma_xfer)
return -ENOMEM;
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 5e91858f6f01..fb522765ce5a 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -70,10 +70,6 @@
#define SPI_STATUS_WE (1UL << 1)
#define SPI_STATUS_RD (1UL << 0)
-#define WRITE 0
-#define READ 1
-
-
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index c4675fa8b645..835cdda6f4f5 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/sizes.h>
#include <asm/unaligned.h>
@@ -40,13 +41,27 @@
#define ORION_SPI_MODE_CPHA (1 << 12)
#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
ORION_SPI_MODE_CPHA)
+enum orion_spi_type {
+ ORION_SPI,
+ ARMADA_SPI,
+};
+
+struct orion_spi_dev {
+ enum orion_spi_type typ;
+ unsigned int min_divisor;
+ unsigned int max_divisor;
+ u32 prescale_mask;
+};
+
struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ const struct orion_spi_dev *devdata;
};
static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
@@ -83,30 +98,66 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
u32 prescale;
u32 reg;
struct orion_spi *orion_spi;
+ const struct orion_spi_dev *devdata;
orion_spi = spi_master_get_devdata(spi->master);
+ devdata = orion_spi->devdata;
tclk_hz = clk_get_rate(orion_spi->clk);
- /*
- * the supported rates are: 4,6,8...30
- * round up as we look for equal or less speed
- */
- rate = DIV_ROUND_UP(tclk_hz, speed);
- rate = roundup(rate, 2);
+ if (devdata->typ == ARMADA_SPI) {
+ unsigned int clk, spr, sppr, sppr2, err;
+ unsigned int best_spr, best_sppr, best_err;
- /* check if requested speed is too small */
- if (rate > 30)
- return -EINVAL;
+ best_err = speed;
+ best_spr = 0;
+ best_sppr = 0;
- if (rate < 4)
- rate = 4;
+ /* Iterate over the valid range looking for best fit */
+ for (sppr = 0; sppr < 8; sppr++) {
+ sppr2 = 0x1 << sppr;
+
+ spr = tclk_hz / sppr2;
+ spr = DIV_ROUND_UP(spr, speed);
+ if ((spr == 0) || (spr > 15))
+ continue;
+
+ clk = tclk_hz / (spr * sppr2);
+ err = speed - clk;
+
+ if (err < best_err) {
+ best_spr = spr;
+ best_sppr = sppr;
+ best_err = err;
+ }
+ }
- /* Convert the rate to SPI clock divisor value. */
- prescale = 0x10 + rate/2;
+ if ((best_sppr == 0) && (best_spr == 0))
+ return -EINVAL;
+
+ prescale = ((best_sppr & 0x6) << 5) |
+ ((best_sppr & 0x1) << 4) | best_spr;
+ } else {
+ /*
+ * the supported rates are: 4,6,8...30
+ * round up as we look for equal or less speed
+ */
+ rate = DIV_ROUND_UP(tclk_hz, speed);
+ rate = roundup(rate, 2);
+
+ /* check if requested speed is too small */
+ if (rate > 30)
+ return -EINVAL;
+
+ if (rate < 4)
+ rate = 4;
+
+ /* Convert the rate to SPI clock divisor value. */
+ prescale = 0x10 + rate/2;
+ }
reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
- reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale);
+ reg = ((reg & ~devdata->prescale_mask) | prescale);
writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
return 0;
@@ -179,8 +230,8 @@ static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) {
if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG)))
return 1;
- else
- udelay(1);
+
+ udelay(1);
}
return -1;
@@ -342,8 +393,31 @@ static int orion_spi_reset(struct orion_spi *orion_spi)
return 0;
}
+static const struct orion_spi_dev orion_spi_dev_data = {
+ .typ = ORION_SPI,
+ .min_divisor = 4,
+ .max_divisor = 30,
+ .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .min_divisor = 1,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct of_device_id orion_spi_of_match_table[] = {
+ { .compatible = "marvell,orion-spi", .data = &orion_spi_dev_data, },
+ { .compatible = "marvell,armada-370-spi", .data = &armada_spi_dev_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
+
static int orion_spi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
+ const struct orion_spi_dev *devdata;
struct spi_master *master;
struct orion_spi *spi;
struct resource *r;
@@ -360,6 +434,7 @@ static int orion_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
if (pdev->dev.of_node) {
u32 cell_index;
+
if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
&cell_index))
master->bus_num = cell_index;
@@ -378,6 +453,10 @@ static int orion_spi_probe(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
spi->master = master;
+ of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
+ devdata = of_id->data;
+ spi->devdata = devdata;
+
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
status = PTR_ERR(spi->clk);
@@ -389,8 +468,8 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out;
tclk_hz = clk_get_rate(spi->clk);
- master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
- master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
+ master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -469,12 +548,6 @@ static const struct dev_pm_ops orion_spi_pm_ops = {
NULL)
};
-static const struct of_device_id orion_spi_of_match_table[] = {
- { .compatible = "marvell,orion-spi", },
- {}
-};
-MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
-
static struct platform_driver orion_spi_driver = {
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f1f0a587e4fc..f35f723816ea 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -82,6 +82,7 @@
#define SSP_MIS(r) (r + 0x01C)
#define SSP_ICR(r) (r + 0x020)
#define SSP_DMACR(r) (r + 0x024)
+#define SSP_CSR(r) (r + 0x030) /* vendor extension */
#define SSP_ITCR(r) (r + 0x080)
#define SSP_ITIP(r) (r + 0x084)
#define SSP_ITOP(r) (r + 0x088)
@@ -198,6 +199,12 @@
#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
/*
+ * SSP Chip Select Control Register - SSP_CSR
+ * (vendor extension)
+ */
+#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
+
+/*
* SSP Integration Test control Register - SSP_ITCR
*/
#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
@@ -313,6 +320,7 @@ enum ssp_writing {
* @extended_cr: 32 bit wide control register 0 with extra
* features and extra features in CR1 as found in the ST variants
* @pl023: supports a subset of the ST extensions called "PL023"
+ * @internal_cs_ctrl: supports chip select control register
*/
struct vendor_data {
int fifodepth;
@@ -321,6 +329,7 @@ struct vendor_data {
bool extended_cr;
bool pl023;
bool loopback;
+ bool internal_cs_ctrl;
};
/**
@@ -440,9 +449,32 @@ static void null_cs_control(u32 command)
pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
}
+/**
+ * internal_cs_control - Control chip select signals via SSP_CSR.
+ * @pl022: SSP driver private data structure
+ * @command: select/delect the chip
+ *
+ * Used on controller with internal chip select control via SSP_CSR register
+ * (vendor extension). Each of the 5 LSB in the register controls one chip
+ * select signal.
+ */
+static void internal_cs_control(struct pl022 *pl022, u32 command)
+{
+ u32 tmp;
+
+ tmp = readw(SSP_CSR(pl022->virtbase));
+ if (command == SSP_CHIP_SELECT)
+ tmp &= ~BIT(pl022->cur_cs);
+ else
+ tmp |= BIT(pl022->cur_cs);
+ writew(tmp, SSP_CSR(pl022->virtbase));
+}
+
static void pl022_cs_control(struct pl022 *pl022, u32 command)
{
- if (gpio_is_valid(pl022->cur_cs))
+ if (pl022->vendor->internal_cs_ctrl)
+ internal_cs_control(pl022, command);
+ else if (gpio_is_valid(pl022->cur_cs))
gpio_set_value(pl022->cur_cs, command);
else
pl022->cur_chip->cs_control(command);
@@ -2100,6 +2132,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
pl022->vendor = id->data;
pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
GFP_KERNEL);
+ if (!pl022->chipselects) {
+ status = -ENOMEM;
+ goto err_no_mem;
+ }
/*
* Bus Number Which has been Assigned to this SSP controller
@@ -2118,6 +2154,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
if (platform_info->num_chipselect && platform_info->chipselects) {
for (i = 0; i < num_cs; i++)
pl022->chipselects[i] = platform_info->chipselects[i];
+ } else if (pl022->vendor->internal_cs_ctrl) {
+ for (i = 0; i < num_cs; i++)
+ pl022->chipselects[i] = i;
} else if (IS_ENABLED(CONFIG_OF)) {
for (i = 0; i < num_cs; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -2241,6 +2280,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
amba_release_regions(adev);
err_no_ioregion:
err_no_gpio:
+ err_no_mem:
spi_master_put(master);
return status;
}
@@ -2347,6 +2387,7 @@ static struct vendor_data vendor_arm = {
.extended_cr = false,
.pl023 = false,
.loopback = true,
+ .internal_cs_ctrl = false,
};
static struct vendor_data vendor_st = {
@@ -2356,6 +2397,7 @@ static struct vendor_data vendor_st = {
.extended_cr = true,
.pl023 = false,
.loopback = true,
+ .internal_cs_ctrl = false,
};
static struct vendor_data vendor_st_pl023 = {
@@ -2365,6 +2407,17 @@ static struct vendor_data vendor_st_pl023 = {
.extended_cr = true,
.pl023 = true,
.loopback = false,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_lsi = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = true,
};
static struct amba_id pl022_ids[] = {
@@ -2398,6 +2451,15 @@ static struct amba_id pl022_ids[] = {
.mask = 0xffffffff,
.data = &vendor_st_pl023,
},
+ {
+ /*
+ * PL022 variant that has a chip select control register whih
+ * allows control of 5 output signals nCS[0:4].
+ */
+ .id = 0x000b6022,
+ .mask = 0x000fffff,
+ .data = &vendor_lsi,
+ },
{ 0, 0 },
};
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index c1865c92ccb9..536c863bebf1 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -7,6 +7,8 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
enum {
PORT_CE4100,
@@ -21,6 +23,7 @@ struct pxa_spi_info {
int tx_chan_id;
int rx_slave_id;
int rx_chan_id;
+ unsigned long max_clk_rate;
};
static struct pxa_spi_info spi_info_configs[] = {
@@ -32,6 +35,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.tx_chan_id = -1,
.rx_slave_id = -1,
.rx_chan_id = -1,
+ .max_clk_rate = 3686400,
},
[PORT_BYT] = {
.type = LPSS_SSP,
@@ -41,6 +45,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.tx_chan_id = 0,
.rx_slave_id = 1,
.rx_chan_id = 1,
+ .max_clk_rate = 50000000,
},
};
@@ -53,6 +58,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
struct pxa_spi_info *c;
+ char buf[40];
ret = pcim_enable_device(dev);
if (ret)
@@ -84,6 +90,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
ssp->type = c->type;
+ snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL,
+ CLK_IS_ROOT, c->max_clk_rate);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
memset(&pi, 0, sizeof(pi));
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
@@ -92,8 +104,10 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
pi.size_data = sizeof(spi_pdata);
pdev = platform_device_register_full(&pi);
- if (IS_ERR(pdev))
+ if (IS_ERR(pdev)) {
+ clk_unregister(ssp->clk);
return PTR_ERR(pdev);
+ }
pci_set_drvdata(dev, pdev);
@@ -103,8 +117,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
+ struct pxa2xx_spi_master *spi_pdata;
+
+ spi_pdata = dev_get_platdata(&pdev->dev);
platform_device_unregister(pdev);
+ clk_unregister(spi_pdata->ssp.clk);
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 3afc266b666d..f96ea8a38d64 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -415,7 +415,7 @@ static void rockchip_spi_dma_txcb(void *data)
spin_unlock_irqrestore(&rs->lock, flags);
}
-static int rockchip_spi_dma_transfer(struct rockchip_spi *rs)
+static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
{
unsigned long flags;
struct dma_slave_config rxconf, txconf;
@@ -474,8 +474,6 @@ static int rockchip_spi_dma_transfer(struct rockchip_spi *rs)
dmaengine_submit(txdesc);
dma_async_issue_pending(rs->dma_tx.ch);
}
-
- return 1;
}
static void rockchip_spi_config(struct rockchip_spi *rs)
@@ -557,16 +555,17 @@ static int rockchip_spi_transfer_one(
else if (rs->rx)
rs->tmode = CR0_XFM_RO;
- if (master->can_dma && master->can_dma(master, spi, xfer))
+ /* we need prepare dma before spi was enabled */
+ if (master->can_dma && master->can_dma(master, spi, xfer)) {
rs->use_dma = 1;
- else
+ rockchip_spi_prepare_dma(rs);
+ } else {
rs->use_dma = 0;
+ }
rockchip_spi_config(rs);
- if (rs->use_dma)
- ret = rockchip_spi_dma_transfer(rs);
- else
+ if (!rs->use_dma)
ret = rockchip_spi_pio_transfer(rs);
return ret;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index ad87a98f8f68..54bb0faec155 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -87,7 +87,7 @@
/* RSPI on SH only */
#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
-/* QSPI on R-Car M2 only */
+/* QSPI on R-Car Gen2 only */
#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
@@ -909,20 +909,24 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(unsigned long)id);
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
- dev_warn(dev, "dma_request_channel failed\n");
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
return NULL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.slave_id = id;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
- else
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
@@ -938,22 +942,30 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
+ dma_tx_id = rspi_pd->dma_tx_id;
+ dma_rx_id = rspi_pd->dma_rx_id;
+ } else {
+ /* The driver assumes no error. */
+ return 0;
+ }
- if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id)
- return 0; /* The driver assumes no error. */
-
- master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM,
- rspi_pd->dma_rx_id,
+ master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
- if (!master->dma_rx)
+ if (!master->dma_tx)
return -ENODEV;
- master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV,
- rspi_pd->dma_tx_id,
+ master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
- if (!master->dma_tx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (!master->dma_rx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
return -ENODEV;
}
@@ -1046,12 +1058,11 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, const char *suffix,
void *dev_id)
{
- const char *base = dev_name(dev);
- size_t len = strlen(base) + strlen(suffix) + 2;
- char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+ dev_name(dev), suffix);
if (!name)
return -ENOMEM;
- snprintf(name, len, "%s:%s", base, suffix);
+
return devm_request_irq(dev, irq, handler, 0, name, dev_id);
}
@@ -1084,7 +1095,7 @@ static int rspi_probe(struct platform_device *pdev)
master->num_chipselect = rspi_pd->num_chipselect;
else
master->num_chipselect = 2; /* default */
- };
+ }
/* ops parameter check */
if (!ops->set_config_register) {
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 543075b80f16..3f365402fcc0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -642,18 +642,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
p->rx_dma_addr, len, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx) {
- ret = -EAGAIN;
- goto no_dma_rx;
- }
+ if (!desc_rx)
+ return -EAGAIN;
desc_rx->callback = sh_msiof_dma_complete;
desc_rx->callback_param = p;
cookie = dmaengine_submit(desc_rx);
- if (dma_submit_error(cookie)) {
- ret = cookie;
- goto no_dma_rx;
- }
+ if (dma_submit_error(cookie))
+ return cookie;
}
if (tx) {
@@ -738,7 +734,6 @@ no_dma_tx:
if (rx)
dmaengine_terminate_all(p->master->dma_rx);
sh_msiof_write(p, IER, 0);
-no_dma_rx:
return ret;
}
@@ -933,6 +928,9 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
@@ -977,20 +975,24 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(unsigned long)id);
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
- dev_warn(dev, "dma_request_channel failed\n");
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
return NULL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.slave_id = id;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
- else
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
@@ -1007,12 +1009,22 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
struct platform_device *pdev = p->pdev;
struct device *dev = &pdev->dev;
const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
const struct resource *res;
struct spi_master *master;
struct device *tx_dev, *rx_dev;
- if (!info || !info->dma_tx_id || !info->dma_rx_id)
- return 0; /* The driver assumes no error */
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (info && info->dma_tx_id && info->dma_rx_id) {
+ dma_tx_id = info->dma_tx_id;
+ dma_rx_id = info->dma_rx_id;
+ } else {
+ /* The driver assumes no error */
+ return 0;
+ }
/* The DMA engine uses the second register set, if present */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1021,13 +1033,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
master = p->master;
master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- info->dma_tx_id,
+ dma_tx_id,
res->start + TFDR);
if (!master->dma_tx)
return -ENODEV;
master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- info->dma_rx_id,
+ dma_rx_id,
res->start + RFDR);
if (!master->dma_rx)
goto free_tx_chan;
@@ -1210,6 +1222,9 @@ static struct platform_device_id spi_driver_ids[] = {
{ "spi_sh_msiof", (kernel_ulong_t)&sh_data },
{ "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
{ "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7792_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7793_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7794_msiof", (kernel_ulong_t)&r8a779x_data },
{},
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 6f0602fd7401..39e2c0a55a28 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -62,15 +62,15 @@
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
-#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
-#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
-#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
+#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
+#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
+#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
/* Interrupt Enable */
-#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
-#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
-#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
-#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
+#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
+#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
+#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
+#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
@@ -79,7 +79,7 @@
#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
-#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
+#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
/* Interrupt status */
#define SIRFSOC_SPI_RX_DONE BIT(0)
@@ -170,8 +170,7 @@ struct sirfsoc_spi {
* command model
*/
bool tx_by_cmd;
-
- int chipselect[0];
+ bool hw_cs;
};
static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
@@ -304,7 +303,7 @@ static void spi_sirfsoc_dma_fini_callback(void *data)
complete(dma_complete);
}
-static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
+static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
@@ -328,10 +327,9 @@ static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
sspi->base + SIRFSOC_SPI_TX_RX_EN);
if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
dev_err(&spi->dev, "cmd transfer timeout\n");
- return 0;
+ return;
}
-
- return t->len;
+ sspi->left_rx_word -= t->len;
}
static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
@@ -487,7 +485,7 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
{
struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
- if (sspi->chipselect[spi->chip_select] == 0) {
+ if (sspi->hw_cs) {
u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
switch (value) {
case BITBANG_CS_ACTIVE:
@@ -505,14 +503,13 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
}
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
} else {
- int gpio = sspi->chipselect[spi->chip_select];
switch (value) {
case BITBANG_CS_ACTIVE:
- gpio_direction_output(gpio,
+ gpio_direction_output(spi->cs_gpio,
spi->mode & SPI_CS_HIGH ? 1 : 0);
break;
case BITBANG_CS_INACTIVE:
- gpio_direction_output(gpio,
+ gpio_direction_output(spi->cs_gpio,
spi->mode & SPI_CS_HIGH ? 0 : 1);
break;
}
@@ -606,8 +603,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi->tx_by_cmd = false;
}
/*
- * set spi controller in RISC chipselect mode, we are controlling CS by
- * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
+ * it should never set to hardware cs mode because in hardware cs mode,
+ * cs signal can't controlled by driver.
*/
regval |= SIRFSOC_SPI_CS_IO_MODE;
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
@@ -630,9 +627,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static int spi_sirfsoc_setup(struct spi_device *spi)
{
+ struct sirfsoc_spi *sspi;
+
if (!spi->max_speed_hz)
return -EINVAL;
+ sspi = spi_master_get_devdata(spi->master);
+
+ if (spi->cs_gpio == -ENOENT)
+ sspi->hw_cs = true;
+ else
+ sspi->hw_cs = false;
return spi_sirfsoc_setup_transfer(spi, NULL);
}
@@ -641,19 +646,10 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- int num_cs, cs_gpio, irq;
- int i;
- int ret;
-
- ret = of_property_read_u32(pdev->dev.of_node,
- "sirf,spi-num-chipselects", &num_cs);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get chip select number\n");
- goto err_cs;
- }
+ int irq;
+ int i, ret;
- master = spi_alloc_master(&pdev->dev,
- sizeof(*sspi) + sizeof(int) * num_cs);
+ master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
return -ENOMEM;
@@ -661,32 +657,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- master->num_chipselect = num_cs;
-
- for (i = 0; i < master->num_chipselect; i++) {
- cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
- if (cs_gpio < 0) {
- dev_err(&pdev->dev, "can't get cs gpio from DT\n");
- ret = -ENODEV;
- goto free_master;
- }
-
- sspi->chipselect[i] = cs_gpio;
- if (cs_gpio == 0)
- continue; /* use cs from spi controller */
-
- ret = gpio_request(cs_gpio, DRIVER_NAME);
- if (ret) {
- while (i > 0) {
- i--;
- if (sspi->chipselect[i] > 0)
- gpio_free(sspi->chipselect[i]);
- }
- dev_err(&pdev->dev, "fail to request cs gpios\n");
- goto free_master;
- }
- }
-
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sspi->base)) {
@@ -756,7 +726,21 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
goto free_dummypage;
-
+ for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
+ if (master->cs_gpios[i] == -ENOENT)
+ continue;
+ if (!gpio_is_valid(master->cs_gpios[i])) {
+ dev_err(&pdev->dev, "no valid gpio\n");
+ ret = -EINVAL;
+ goto free_dummypage;
+ }
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i], DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request gpio\n");
+ goto free_dummypage;
+ }
+ }
dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
return 0;
@@ -771,7 +755,7 @@ free_rx_dma:
dma_release_channel(sspi->rx_chan);
free_master:
spi_master_put(master);
-err_cs:
+
return ret;
}
@@ -779,16 +763,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct sirfsoc_spi *sspi;
- int i;
master = platform_get_drvdata(pdev);
sspi = spi_master_get_devdata(master);
spi_bitbang_stop(&sspi->bitbang);
- for (i = 0; i < master->num_chipselect; i++) {
- if (sspi->chipselect[i] > 0)
- gpio_free(sspi->chipselect[i]);
- }
kfree(sspi->dummypage);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index e4a85ada861d..795bcbc0131b 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -302,6 +302,7 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
+
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
@@ -312,6 +313,7 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
+
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
@@ -338,6 +340,7 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
@@ -345,8 +348,10 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
read_words += tspi->curr_dma_words;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
+
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -365,6 +370,7 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
if (tspi->is_packed) {
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
@@ -374,6 +380,7 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
+
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
x |= (u32)(*tx_buf++) << (i * 8);
@@ -396,6 +403,7 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
if (tspi->is_packed) {
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
} else {
unsigned int i;
@@ -405,6 +413,7 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
+
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 3548ce25c08f..cd66fe7b78a9 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -99,7 +99,7 @@
#define SPI_TX_TRIG_MASK (0x3 << 16)
#define SPI_TX_TRIG_1W (0x0 << 16)
#define SPI_TX_TRIG_4W (0x1 << 16)
-#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF);
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF)
#define SPI_TX_FIFO 0x10
#define SPI_RX_FIFO 0x20
@@ -221,6 +221,7 @@ static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
while (!(status & SPI_RXF_EMPTY)) {
int i;
u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+
for (i = 0; (i < tsd->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
read_words++;
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 5f183baa91a9..2501a8373e89 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -97,6 +97,7 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
int on, unsigned int cs_delay)
{
int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
+
if (on) {
/* deselect the chip with cs_change hint in last transfer */
if (c->last_chipselect >= 0)
@@ -188,6 +189,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
if (prev_speed_hz != speed_hz
|| prev_bits_per_word != bits_per_word) {
int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1;
+
n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER);
/* enter config mode */
txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 4d8efb16573d..79bd84f43430 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -471,7 +471,6 @@ static struct platform_driver xilinx_spi_driver = {
.remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
- .owner = THIS_MODULE,
.of_match_table = xilinx_spi_of_match,
},
};
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 41e158187f9d..0dc5df5233a9 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -46,6 +46,7 @@ static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
{
unsigned i;
+
for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
i < BUSY_WAIT_US; ++i)
udelay(1);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ca935df80c88..e19512ffc40e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -552,6 +552,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
struct boardinfo *bi;
int i;
+ if (!n)
+ return -EINVAL;
+
bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
if (!bi)
return -ENOMEM;
@@ -789,27 +792,35 @@ static int spi_transfer_one_message(struct spi_master *master,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
- reinit_completion(&master->xfer_completion);
+ if (xfer->tx_buf || xfer->rx_buf) {
+ reinit_completion(&master->xfer_completion);
- ret = master->transfer_one(master, msg->spi, xfer);
- if (ret < 0) {
- dev_err(&msg->spi->dev,
- "SPI transfer failed: %d\n", ret);
- goto out;
- }
+ ret = master->transfer_one(master, msg->spi, xfer);
+ if (ret < 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
- if (ret > 0) {
- ret = 0;
- ms = xfer->len * 8 * 1000 / xfer->speed_hz;
- ms += ms + 100; /* some tolerance */
+ if (ret > 0) {
+ ret = 0;
+ ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms += ms + 100; /* some tolerance */
- ms = wait_for_completion_timeout(&master->xfer_completion,
- msecs_to_jiffies(ms));
- }
+ ms = wait_for_completion_timeout(&master->xfer_completion,
+ msecs_to_jiffies(ms));
+ }
- if (ms == 0) {
- dev_err(&msg->spi->dev, "SPI transfer timed out\n");
- msg->status = -ETIMEDOUT;
+ if (ms == 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer timed out\n");
+ msg->status = -ETIMEDOUT;
+ }
+ } else {
+ if (xfer->len)
+ dev_err(&msg->spi->dev,
+ "Bufferless transfer has length %u\n",
+ xfer->len);
}
trace_spi_transfer_stop(msg, xfer);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 1d42dba6121d..bd672948f2f1 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -3587,7 +3587,7 @@ static void __used s8250_options(void)
#ifdef CONFIG_SERIAL_8250_RSA
__module_param_call(MODULE_PARAM_PREFIX, probe_rsa,
&param_array_ops, .arr = &__param_arr_probe_rsa,
- 0444, -1);
+ 0444, -1, 0);
#endif
}
#else
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fa038a2e30b2..a8a30b1d4167 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -131,7 +131,7 @@ config XPS_USB_HCD_XILINX
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
---help---
- Xilinx xps USB host controller core is EHCI compilant and has
+ Xilinx xps USB host controller core is EHCI compliant and has
transaction translator built-in. It can be configured to either
support both high speed and full speed devices, or high speed
devices only.
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d1dbe8833b4a..89b24349269e 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -818,6 +818,13 @@ static struct scsi_host_template uas_host_template = {
.cmd_per_lun = 1, /* until we override it */
.skip_settle_delay = 1,
.ordered_tag = 1,
+
+ /*
+ * The uas drivers expects tags not to be bigger than the maximum
+ * per-device queue depth, which is not true with the blk-mq tag
+ * allocator.
+ */
+ .disable_blk_mq = true,
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index ef9058138dff..9d66ce62542e 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -990,6 +990,14 @@ int usb_stor_probe2(struct us_data *us)
if (!(us->fflags & US_FL_SCM_MULT_TARG))
us_to_host(us)->max_id = 1;
+ /*
+ * Like Windows, we won't store the LUN bits in CDB[1] for SCSI-2
+ * devices using the Bulk-Only transport (even though this violates
+ * the SCSI spec).
+ */
+ if (us->transport == usb_stor_Bulk_transport)
+ us_to_host(us)->no_scsi2_lun_in_cdb = 1;
+
/* Find the endpoints and calculate pipe values */
result = get_pipes(us);
if (result)
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 7db5234462d0..a6f7cc0a0883 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -216,7 +216,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
data->reg_duty_cycle = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "always on");
if (!res) {
- dev_err(&pdev->dev, "No REG resorce for always on\n");
+ dev_err(&pdev->dev, "No REG resource for always on\n");
return -ENXIO;
}
data->reg_always_on = res->start;